problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_3528
|
rasdani/github-patches
|
git_diff
|
pytorch__examples-1109
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
word Language Model bug
self.decoder = nn.Linear(**ninp**, ntoken) in model.py line 124 shoud be "nhid"
</issue>
<code>
[start of word_language_model/model.py]
1 import math
2 import torch
3 import torch.nn as nn
4 import torch.nn.functional as F
5
6 class RNNModel(nn.Module):
7 """Container module with an encoder, a recurrent module, and a decoder."""
8
9 def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
10 super(RNNModel, self).__init__()
11 self.ntoken = ntoken
12 self.drop = nn.Dropout(dropout)
13 self.encoder = nn.Embedding(ntoken, ninp)
14 if rnn_type in ['LSTM', 'GRU']:
15 self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
16 else:
17 try:
18 nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
19 except KeyError as e:
20 raise ValueError( """An invalid option for `--model` was supplied,
21 options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""") from e
22 self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
23 self.decoder = nn.Linear(nhid, ntoken)
24
25 # Optionally tie weights as in:
26 # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
27 # https://arxiv.org/abs/1608.05859
28 # and
29 # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
30 # https://arxiv.org/abs/1611.01462
31 if tie_weights:
32 if nhid != ninp:
33 raise ValueError('When using the tied flag, nhid must be equal to emsize')
34 self.decoder.weight = self.encoder.weight
35
36 self.init_weights()
37
38 self.rnn_type = rnn_type
39 self.nhid = nhid
40 self.nlayers = nlayers
41
42 def init_weights(self):
43 initrange = 0.1
44 nn.init.uniform_(self.encoder.weight, -initrange, initrange)
45 nn.init.zeros_(self.decoder.bias)
46 nn.init.uniform_(self.decoder.weight, -initrange, initrange)
47
48 def forward(self, input, hidden):
49 emb = self.drop(self.encoder(input))
50 output, hidden = self.rnn(emb, hidden)
51 output = self.drop(output)
52 decoded = self.decoder(output)
53 decoded = decoded.view(-1, self.ntoken)
54 return F.log_softmax(decoded, dim=1), hidden
55
56 def init_hidden(self, bsz):
57 weight = next(self.parameters())
58 if self.rnn_type == 'LSTM':
59 return (weight.new_zeros(self.nlayers, bsz, self.nhid),
60 weight.new_zeros(self.nlayers, bsz, self.nhid))
61 else:
62 return weight.new_zeros(self.nlayers, bsz, self.nhid)
63
64 # Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
65 class PositionalEncoding(nn.Module):
66 r"""Inject some information about the relative or absolute position of the tokens in the sequence.
67 The positional encodings have the same dimension as the embeddings, so that the two can be summed.
68 Here, we use sine and cosine functions of different frequencies.
69 .. math:
70 \text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
71 \text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
72 \text{where pos is the word position and i is the embed idx)
73 Args:
74 d_model: the embed dim (required).
75 dropout: the dropout value (default=0.1).
76 max_len: the max. length of the incoming sequence (default=5000).
77 Examples:
78 >>> pos_encoder = PositionalEncoding(d_model)
79 """
80
81 def __init__(self, d_model, dropout=0.1, max_len=5000):
82 super(PositionalEncoding, self).__init__()
83 self.dropout = nn.Dropout(p=dropout)
84
85 pe = torch.zeros(max_len, d_model)
86 position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
87 div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
88 pe[:, 0::2] = torch.sin(position * div_term)
89 pe[:, 1::2] = torch.cos(position * div_term)
90 pe = pe.unsqueeze(0).transpose(0, 1)
91 self.register_buffer('pe', pe)
92
93 def forward(self, x):
94 r"""Inputs of forward function
95 Args:
96 x: the sequence fed to the positional encoder model (required).
97 Shape:
98 x: [sequence length, batch size, embed dim]
99 output: [sequence length, batch size, embed dim]
100 Examples:
101 >>> output = pos_encoder(x)
102 """
103
104 x = x + self.pe[:x.size(0), :]
105 return self.dropout(x)
106
107 class TransformerModel(nn.Module):
108 """Container module with an encoder, a recurrent or transformer module, and a decoder."""
109
110 def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
111 super(TransformerModel, self).__init__()
112 try:
113 from torch.nn import TransformerEncoder, TransformerEncoderLayer
114 except BaseException as e:
115 raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or '
116 'lower.') from e
117 self.model_type = 'Transformer'
118 self.src_mask = None
119 self.pos_encoder = PositionalEncoding(ninp, dropout)
120 encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
121 self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
122 self.encoder = nn.Embedding(ntoken, ninp)
123 self.ninp = ninp
124 self.decoder = nn.Linear(ninp, ntoken)
125
126 self.init_weights()
127
128 def _generate_square_subsequent_mask(self, sz):
129 mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
130 mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
131 return mask
132
133 def init_weights(self):
134 initrange = 0.1
135 nn.init.uniform_(self.encoder.weight, -initrange, initrange)
136 nn.init.zeros_(self.decoder.bias)
137 nn.init.uniform_(self.decoder.weight, -initrange, initrange)
138
139 def forward(self, src, has_mask=True):
140 if has_mask:
141 device = src.device
142 if self.src_mask is None or self.src_mask.size(0) != len(src):
143 mask = self._generate_square_subsequent_mask(len(src)).to(device)
144 self.src_mask = mask
145 else:
146 self.src_mask = None
147
148 src = self.encoder(src) * math.sqrt(self.ninp)
149 src = self.pos_encoder(src)
150 output = self.transformer_encoder(src, self.src_mask)
151 output = self.decoder(output)
152 return F.log_softmax(output, dim=-1)
153
[end of word_language_model/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/word_language_model/model.py b/word_language_model/model.py
--- a/word_language_model/model.py
+++ b/word_language_model/model.py
@@ -121,7 +121,7 @@
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
- self.decoder = nn.Linear(ninp, ntoken)
+ self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
|
{"golden_diff": "diff --git a/word_language_model/model.py b/word_language_model/model.py\n--- a/word_language_model/model.py\n+++ b/word_language_model/model.py\n@@ -121,7 +121,7 @@\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n- self.decoder = nn.Linear(ninp, ntoken)\n+ self.decoder = nn.Linear(nhid, ntoken)\n \n self.init_weights()\n", "issue": "word Language Model bug\nself.decoder = nn.Linear(**ninp**, ntoken) in model.py line 124 shoud be \"nhid\" \n", "before_files": [{"content": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):\n super(RNNModel, self).__init__()\n self.ntoken = ntoken\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n except KeyError as e:\n raise ValueError( \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\") from e\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n if nhid != ninp:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.bias)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output)\n decoded = decoded.view(-1, self.ntoken)\n return F.log_softmax(decoded, dim=1), hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (weight.new_zeros(self.nlayers, bsz, self.nhid),\n weight.new_zeros(self.nlayers, bsz, self.nhid))\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.\nclass PositionalEncoding(nn.Module):\n r\"\"\"Inject some information about the relative or absolute position of the tokens in the sequence.\n The positional encodings have the same dimension as the embeddings, so that the two can be summed.\n Here, we use sine and cosine functions of different frequencies.\n .. math:\n \\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))\n \\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))\n \\text{where pos is the word position and i is the embed idx)\n Args:\n d_model: the embed dim (required).\n dropout: the dropout value (default=0.1).\n max_len: the max. length of the incoming sequence (default=5000).\n Examples:\n >>> pos_encoder = PositionalEncoding(d_model)\n \"\"\"\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n r\"\"\"Inputs of forward function\n Args:\n x: the sequence fed to the positional encoder model (required).\n Shape:\n x: [sequence length, batch size, embed dim]\n output: [sequence length, batch size, embed dim]\n Examples:\n >>> output = pos_encoder(x)\n \"\"\"\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\nclass TransformerModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent or transformer module, and a decoder.\"\"\"\n\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n super(TransformerModel, self).__init__()\n try:\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n except BaseException as e:\n raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or '\n 'lower.') from e\n self.model_type = 'Transformer'\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.bias)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, src, has_mask=True):\n if has_mask:\n device = src.device\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n else:\n self.src_mask = None\n\n src = self.encoder(src) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, self.src_mask)\n output = self.decoder(output)\n return F.log_softmax(output, dim=-1)\n", "path": "word_language_model/model.py"}]}
| 2,537 | 117 |
gh_patches_debug_35169
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1540
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Restore access to India-Gujarat (IN-GJ) wind and solar data
Hey everyone,
looks like SLDCGUJ has also removed the access to real-time data websites for wind and solar, just like they did for the conventional real-time data.
The solution should be really simple: We can use the same link which still gives access to that ["print" page](http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php). It has wind and solar generation on the right side, too ;)

Greetings, Alex
</issue>
<code>
[start of parsers/IN_GJ.py]
1 #!/usr/bin/env python3
2
3 import re
4 import collections
5 from operator import itemgetter
6 import arrow
7 import requests
8 import itertools
9 from .lib import zonekey, web, IN
10 from .lib.validation import validate
11 from logging import getLogger
12
13
14 station_map = {
15 "coal": ["Ukai(1-5)+Ukai6",
16 "Wanakbori",
17 "Gandhinagar",
18 "Sikka(3-4)",
19 "KLTPS(1-3)+KLTPS4",
20 "SLPP(I+II)",
21 "Akrimota",
22 "TPAECo",
23 "EPGL(I+II)",
24 "Adani(I+II+III)",
25 "BECL(I+II)",
26 "CGPL"],
27 "hydro": ["Ukai(Hydro)",
28 "Kadana(Hydro)",
29 "SSP(RBPH)"],
30 "gas": ["Utran(Gas)(II)",
31 "Dhuvaran(Gas)(I)+(II)+(III)",
32 "GIPCL(I)+(II)",
33 "GSEG(I+II)",
34 "GPPC",
35 "CLPI",
36 "KAWAS",
37 "Sugen+Unosgn",
38 "JHANOR"],
39 "nuclear": ["KAPP"]
40 }
41
42
43 def split_and_sum(expression):
44 """
45 Avoid using literal_eval for simple addition expressions.
46 Returns sum of all positive numbers.
47 """
48
49 split_vals = expression.split('+')
50 float_vals = [float(v) for v in split_vals]
51 total = sum([v for v in float_vals if v > 0.0])
52
53 return total
54
55
56 def fetch_data(zone_key, session=None, logger=None):
57 session = session or requests.session()
58
59 values = collections.Counter()
60 zonekey.assert_zone_key(zone_key, 'IN-GJ')
61
62 solar_html = web.get_response_soup(
63 zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)
64 wind_html = web.get_response_soup(
65 zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)
66
67 values['date'] = arrow.get(
68 solar_html.find_all('tr')[0].text.split('\t')[-1].strip()
69 + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime
70 values['solar'] = split_and_sum(
71 solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
72 values['wind'] = split_and_sum(
73 wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
74
75 cookies_params = {
76 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',
77 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'
78 }
79
80 rows = web.get_response_soup(
81 zone_key,
82 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',
83 session).find_all('tr')
84
85 for row in rows:
86 elements = row.find_all('td')
87 if len(elements) > 3: # will find production rows
88 v1, v2 = (re.sub(r'\s+', r'', x.text)
89 for x in itemgetter(*[0, 3])(elements))
90 energy_type = [k for k, v in station_map.items() if v1 in v]
91 if len(energy_type) > 0:
92 v2 = split_and_sum(v2)
93 values[energy_type[0]] += v2
94 else:
95 if 'StationName' in (v1, v2): # meta data row
96 continue
97 elif 'DSMRate' in v2: # demand side management
98 continue
99 else:
100 try:
101 logger.warning(
102 'Unknown fuel for station name: {}'.format(v1),
103 extra={'key': zone_key})
104 v2 = split_and_sum(v2)
105 values['unknown'] += v2
106 except ValueError as e:
107 # handle float failures
108 logger.warning(
109 "couldn't convert {} to float".format(v2),
110 extra={'key': zone_key})
111 continue
112 elif len(elements) == 3: # will find consumption row
113 v1, v2 = (re.sub(r'\s+', r'', x.text)
114 for x in itemgetter(*[0, 2])(elements))
115 if v1 == 'GujaratCatered':
116 values['total consumption'] = split_and_sum(v2.split('MW')[0])
117 elif len(elements) == 1:
118 # CGPL/KAPP/KAWAS/JHANOR plants have a different html structure.
119 plant_name = re.sub(r'\s+', r'', elements[0].text)
120 known_plants = itertools.chain.from_iterable(station_map.values())
121
122 if plant_name in known_plants:
123 energy_type = [k for k, v in station_map.items() if plant_name in v][0]
124 generation_tag = row.find_all_next("td")[3]
125 val = float(re.sub(r'\s+', r'', generation_tag.text))
126 if val > 0:
127 values[energy_type] += val
128 else:
129 if plant_name and plant_name != 'GMR':
130 # GMR is outside Gujarat, sometimes plant_name is ''
131 logger.warning(
132 'Unknown fuel for station name: {}'.format(plant_name),
133 extra={'key': zone_key})
134
135 return values
136
137
138 def fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,
139 logger=getLogger('IN-GJ')):
140 """
141 Requests the last known production mix (in MW) of a given country
142 Arguments:
143 zone_key: specifies which zone to get
144 session: request session passed in order to re-use an existing session
145 target_datetime: the datetime for which we want production data. If not provided, we should
146 default it to now. The provided target_datetime is timezone-aware in UTC.
147 logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
148 Return:
149 A list of dictionaries in the form:
150 {
151 'zoneKey': 'FR',
152 'datetime': '2017-01-01T00:00:00Z',
153 'production': {
154 'biomass': 0.0,
155 'coal': 0.0,
156 'gas': 0.0,
157 'hydro': 0.0,
158 'nuclear': null,
159 'oil': 0.0,
160 'solar': 0.0,
161 'wind': 0.0,
162 'geothermal': 0.0,
163 'unknown': 0.0
164 },
165 'storage': {
166 'hydro': -10.0,
167 },
168 'source': 'mysource.com'
169 }
170 """
171 session = session or requests.session()
172 if target_datetime:
173 raise NotImplementedError(
174 'This parser is not yet able to parse past dates')
175
176 value_map = fetch_data(zone_key, session, logger=logger)
177
178 data = {
179 'zoneKey': zone_key,
180 'datetime': value_map['date'],
181 'production': {
182 'biomass': None,
183 'coal': value_map.get('coal', 0),
184 'gas': value_map.get('gas', 0),
185 'hydro': value_map.get('hydro', 0),
186 'nuclear': value_map.get('nuclear', 0),
187 'oil': None,
188 'solar': value_map.get('solar', 0),
189 'wind': value_map.get('wind', 0),
190 'geothermal': None,
191 'unknown': value_map.get('unknown', 0)
192 },
193 'storage': {
194 'hydro': None
195 },
196 'source': 'sldcguj.com',
197 }
198
199 valid_data = validate(data, logger, remove_negative=True, floor=7000)
200
201 return valid_data
202
203
204 def fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,
205 logger=getLogger('IN-GJ')):
206 """
207 Method to get consumption data of Gujarat
208 :param zone_key:
209 :param session:
210 :return:
211 """
212 session = session or requests.session()
213 if target_datetime:
214 raise NotImplementedError(
215 'This parser is not yet able to parse past dates')
216
217 value_map = fetch_data(zone_key, session, logger=logger)
218
219 data = {
220 'zoneKey': zone_key,
221 'datetime': value_map['date'],
222 'consumption': value_map['total consumption'],
223 'source': 'sldcguj.com'
224 }
225
226 return data
227
228
229 if __name__ == '__main__':
230 session = requests.Session()
231 print(fetch_production('IN-GJ', session))
232 print(fetch_consumption('IN-GJ', session))
233
[end of parsers/IN_GJ.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/IN_GJ.py b/parsers/IN_GJ.py
--- a/parsers/IN_GJ.py
+++ b/parsers/IN_GJ.py
@@ -10,6 +10,7 @@
from .lib.validation import validate
from logging import getLogger
+SLDCGUJ_URL = 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php'
station_map = {
"coal": ["Ukai(1-5)+Ukai6",
@@ -59,29 +60,23 @@
values = collections.Counter()
zonekey.assert_zone_key(zone_key, 'IN-GJ')
- solar_html = web.get_response_soup(
- zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)
- wind_html = web.get_response_soup(
- zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)
-
- values['date'] = arrow.get(
- solar_html.find_all('tr')[0].text.split('\t')[-1].strip()
- + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime
- values['solar'] = split_and_sum(
- solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
- values['wind'] = split_and_sum(
- wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())
-
cookies_params = {
'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',
'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'
}
- rows = web.get_response_soup(
- zone_key,
- 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',
- session).find_all('tr')
+ soup = web.get_response_soup(zone_key, SLDCGUJ_URL, session)
+ rows = soup.find_all('tr')
+ cells = [c.text.strip() for c in soup.find_all('td')]
+ # get wind and solar values
+ values['date'] = arrow.get(cells[1], 'DD-MM-YYYY HH:mm:ss').replace(
+ tzinfo='Asia/Kolkata')
+ [wind_solar_index] = [i for i, c in enumerate(cells) if c == '(Wind+Solar) Generation']
+ value = cells[wind_solar_index + 1]
+ values['wind'], values['solar'] = [int(v) for v in value.split(' + ')]
+
+ # get other production values
for row in rows:
elements = row.find_all('td')
if len(elements) > 3: # will find production rows
@@ -177,7 +172,7 @@
data = {
'zoneKey': zone_key,
- 'datetime': value_map['date'],
+ 'datetime': value_map['date'].datetime,
'production': {
'biomass': None,
'coal': value_map.get('coal', 0),
|
{"golden_diff": "diff --git a/parsers/IN_GJ.py b/parsers/IN_GJ.py\n--- a/parsers/IN_GJ.py\n+++ b/parsers/IN_GJ.py\n@@ -10,6 +10,7 @@\n from .lib.validation import validate\n from logging import getLogger\n \n+SLDCGUJ_URL = 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php'\n \n station_map = {\n \"coal\": [\"Ukai(1-5)+Ukai6\",\n@@ -59,29 +60,23 @@\n values = collections.Counter()\n zonekey.assert_zone_key(zone_key, 'IN-GJ')\n \n- solar_html = web.get_response_soup(\n- zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)\n- wind_html = web.get_response_soup(\n- zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)\n-\n- values['date'] = arrow.get(\n- solar_html.find_all('tr')[0].text.split('\\t')[-1].strip()\n- + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime\n- values['solar'] = split_and_sum(\n- solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n- values['wind'] = split_and_sum(\n- wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n-\n cookies_params = {\n 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',\n 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'\n }\n \n- rows = web.get_response_soup(\n- zone_key,\n- 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',\n- session).find_all('tr')\n+ soup = web.get_response_soup(zone_key, SLDCGUJ_URL, session)\n+ rows = soup.find_all('tr')\n+ cells = [c.text.strip() for c in soup.find_all('td')]\n \n+ # get wind and solar values\n+ values['date'] = arrow.get(cells[1], 'DD-MM-YYYY HH:mm:ss').replace(\n+ tzinfo='Asia/Kolkata')\n+ [wind_solar_index] = [i for i, c in enumerate(cells) if c == '(Wind+Solar) Generation']\n+ value = cells[wind_solar_index + 1]\n+ values['wind'], values['solar'] = [int(v) for v in value.split(' + ')]\n+\n+ # get other production values\n for row in rows:\n elements = row.find_all('td')\n if len(elements) > 3: # will find production rows\n@@ -177,7 +172,7 @@\n \n data = {\n 'zoneKey': zone_key,\n- 'datetime': value_map['date'],\n+ 'datetime': value_map['date'].datetime,\n 'production': {\n 'biomass': None,\n 'coal': value_map.get('coal', 0),\n", "issue": "Restore access to India-Gujarat (IN-GJ) wind and solar data\nHey everyone,\r\n\r\nlooks like SLDCGUJ has also removed the access to real-time data websites for wind and solar, just like they did for the conventional real-time data.\r\nThe solution should be really simple: We can use the same link which still gives access to that [\"print\" page](http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php). It has wind and solar generation on the right side, too ;)\r\n\r\n\r\n\r\nGreetings, Alex\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport re\nimport collections\nfrom operator import itemgetter\nimport arrow\nimport requests\nimport itertools\nfrom .lib import zonekey, web, IN\nfrom .lib.validation import validate\nfrom logging import getLogger\n\n\nstation_map = {\n \"coal\": [\"Ukai(1-5)+Ukai6\",\n \"Wanakbori\",\n \"Gandhinagar\",\n \"Sikka(3-4)\",\n \"KLTPS(1-3)+KLTPS4\",\n \"SLPP(I+II)\",\n \"Akrimota\",\n \"TPAECo\",\n \"EPGL(I+II)\",\n \"Adani(I+II+III)\",\n \"BECL(I+II)\",\n \"CGPL\"],\n \"hydro\": [\"Ukai(Hydro)\",\n \"Kadana(Hydro)\",\n \"SSP(RBPH)\"],\n \"gas\": [\"Utran(Gas)(II)\",\n \"Dhuvaran(Gas)(I)+(II)+(III)\",\n \"GIPCL(I)+(II)\",\n \"GSEG(I+II)\",\n \"GPPC\",\n \"CLPI\",\n \"KAWAS\",\n \"Sugen+Unosgn\",\n \"JHANOR\"],\n \"nuclear\": [\"KAPP\"]\n}\n\n\ndef split_and_sum(expression):\n \"\"\"\n Avoid using literal_eval for simple addition expressions.\n Returns sum of all positive numbers.\n \"\"\"\n\n split_vals = expression.split('+')\n float_vals = [float(v) for v in split_vals]\n total = sum([v for v in float_vals if v > 0.0])\n\n return total\n\n\ndef fetch_data(zone_key, session=None, logger=None):\n session = session or requests.session()\n\n values = collections.Counter()\n zonekey.assert_zone_key(zone_key, 'IN-GJ')\n\n solar_html = web.get_response_soup(\n zone_key, 'https://www.sldcguj.com/RealTimeData/GujSolar.php', session)\n wind_html = web.get_response_soup(\n zone_key, 'https://www.sldcguj.com/RealTimeData/wind.php', session)\n\n values['date'] = arrow.get(\n solar_html.find_all('tr')[0].text.split('\\t')[-1].strip()\n + ' Asia/Kolkata', 'D-MM-YYYY H:mm:ss ZZZ').datetime\n values['solar'] = split_and_sum(\n solar_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n values['wind'] = split_and_sum(\n wind_html.find_all('tr')[-1].find_all('td')[-1].text.strip())\n\n cookies_params = {\n 'ASPSESSIONIDSUQQQTRD': 'ODMNNHADJFGCMLFFGFEMOGBL',\n 'PHPSESSID': 'a301jk6p1p8d50dduflceeg6l1'\n }\n\n rows = web.get_response_soup(\n zone_key,\n 'http://www.sldcguj.com/RealTimeData/PrintPage.php?page=realtimedemand.php',\n session).find_all('tr')\n\n for row in rows:\n elements = row.find_all('td')\n if len(elements) > 3: # will find production rows\n v1, v2 = (re.sub(r'\\s+', r'', x.text)\n for x in itemgetter(*[0, 3])(elements))\n energy_type = [k for k, v in station_map.items() if v1 in v]\n if len(energy_type) > 0:\n v2 = split_and_sum(v2)\n values[energy_type[0]] += v2\n else:\n if 'StationName' in (v1, v2): # meta data row\n continue\n elif 'DSMRate' in v2: # demand side management\n continue\n else:\n try:\n logger.warning(\n 'Unknown fuel for station name: {}'.format(v1),\n extra={'key': zone_key})\n v2 = split_and_sum(v2)\n values['unknown'] += v2\n except ValueError as e:\n # handle float failures\n logger.warning(\n \"couldn't convert {} to float\".format(v2),\n extra={'key': zone_key})\n continue\n elif len(elements) == 3: # will find consumption row\n v1, v2 = (re.sub(r'\\s+', r'', x.text)\n for x in itemgetter(*[0, 2])(elements))\n if v1 == 'GujaratCatered':\n values['total consumption'] = split_and_sum(v2.split('MW')[0])\n elif len(elements) == 1:\n # CGPL/KAPP/KAWAS/JHANOR plants have a different html structure.\n plant_name = re.sub(r'\\s+', r'', elements[0].text)\n known_plants = itertools.chain.from_iterable(station_map.values())\n\n if plant_name in known_plants:\n energy_type = [k for k, v in station_map.items() if plant_name in v][0]\n generation_tag = row.find_all_next(\"td\")[3]\n val = float(re.sub(r'\\s+', r'', generation_tag.text))\n if val > 0:\n values[energy_type] += val\n else:\n if plant_name and plant_name != 'GMR':\n # GMR is outside Gujarat, sometimes plant_name is ''\n logger.warning(\n 'Unknown fuel for station name: {}'.format(plant_name),\n extra={'key': zone_key})\n\n return values\n\n\ndef fetch_production(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key: specifies which zone to get\n session: request session passed in order to re-use an existing session\n target_datetime: the datetime for which we want production data. If not provided, we should\n default it to now. The provided target_datetime is timezone-aware in UTC.\n logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'],\n 'production': {\n 'biomass': None,\n 'coal': value_map.get('coal', 0),\n 'gas': value_map.get('gas', 0),\n 'hydro': value_map.get('hydro', 0),\n 'nuclear': value_map.get('nuclear', 0),\n 'oil': None,\n 'solar': value_map.get('solar', 0),\n 'wind': value_map.get('wind', 0),\n 'geothermal': None,\n 'unknown': value_map.get('unknown', 0)\n },\n 'storage': {\n 'hydro': None\n },\n 'source': 'sldcguj.com',\n }\n\n valid_data = validate(data, logger, remove_negative=True, floor=7000)\n\n return valid_data\n\n\ndef fetch_consumption(zone_key='IN-GJ', session=None, target_datetime=None,\n logger=getLogger('IN-GJ')):\n \"\"\"\n Method to get consumption data of Gujarat\n :param zone_key:\n :param session:\n :return:\n \"\"\"\n session = session or requests.session()\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n\n value_map = fetch_data(zone_key, session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': value_map['date'],\n 'consumption': value_map['total consumption'],\n 'source': 'sldcguj.com'\n }\n\n return data\n\n\nif __name__ == '__main__':\n session = requests.Session()\n print(fetch_production('IN-GJ', session))\n print(fetch_consumption('IN-GJ', session))\n", "path": "parsers/IN_GJ.py"}]}
| 3,290 | 737 |
gh_patches_debug_40191
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1268
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`nextret` keeps going after the binary segfaults
If the binary segfaults when running `nextret` (and potentially the other `next*` commands), it keeps trying to continue until you kill it:
```
Program received signal SIGSEGV, Segmentation fault.
0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca "%s", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c
ommon/vfscanf-internal.c:339
339 in ./stdio-common/vfscanf-internal.c
Program received signal SIGSEGV, Segmentation fault.
0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca "%s", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c
ommon/vfscanf-internal.c:339
339 in ./stdio-common/vfscanf-internal.c
Program received signal SIGSEGV, Segmentation fault.
0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca "%s", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c
ommon/vfscanf-internal.c:339
339 in ./stdio-common/vfscanf-internal.c
```
We should stop after the first segfault and return to the prompt.
</issue>
<code>
[start of pwndbg/gdblib/proc.py]
1 """
2 Provides values which would be available from /proc which
3 are not fulfilled by other modules and some process/gdb flow
4 related information.
5 """
6
7 import functools
8 import sys
9 from types import ModuleType
10 from typing import Any
11 from typing import Callable
12
13 import gdb
14
15 import pwndbg.gdblib.qemu
16 import pwndbg.lib.memoize
17
18
19 class module(ModuleType):
20 @property
21 def pid(self):
22 # QEMU usermode emulation always returns 42000 for some reason.
23 # In any case, we can't use the info.
24 if pwndbg.gdblib.qemu.is_qemu_usermode():
25 return pwndbg.gdblib.qemu.pid()
26
27 i = gdb.selected_inferior()
28 if i is not None:
29 return i.pid
30 return 0
31
32 @property
33 def tid(self):
34 if pwndbg.gdblib.qemu.is_qemu_usermode():
35 return pwndbg.gdblib.qemu.pid()
36
37 i = gdb.selected_thread()
38 if i is not None:
39 return i.ptid[1]
40
41 return self.pid
42
43 @property
44 def alive(self):
45 return gdb.selected_thread() is not None
46
47 @property
48 def thread_is_stopped(self):
49 """
50 This detects whether selected thread is stopped.
51 It is not stopped in situations when gdb is executing commands
52 that are attached to a breakpoint by `command` command.
53
54 For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 )
55 :return: Whether gdb executes commands attached to bp with `command` command.
56 """
57 return gdb.selected_thread().is_stopped()
58
59 @property
60 def exe(self):
61 """
62 Returns the debugged file name.
63
64 On remote targets, this may be prefixed with "target:" string.
65 See this by executing those in two terminals:
66 1. gdbserver 127.0.0.1:1234 /bin/ls
67 2. gdb -ex "target remote :1234" -ex "pi pwndbg.gdblib.proc.exe"
68
69 If you need to process the debugged file use:
70 `pwndbg.gdblib.file.get_file(pwndbg.gdblib.proc.exe)`
71 """
72 return gdb.current_progspace().filename
73
74 def OnlyWhenRunning(self, func):
75 @functools.wraps(func)
76 def wrapper(*a, **kw):
77 if self.alive:
78 return func(*a, **kw)
79
80 return wrapper
81
82
83 OnlyWhenRunning: Callable[[Any], Any]
84 # To prevent garbage collection
85 tether = sys.modules[__name__]
86
87 sys.modules[__name__] = module(__name__, "")
88
[end of pwndbg/gdblib/proc.py]
[start of pwndbg/gdblib/next.py]
1 """
2 Commands for setting temporary breakpoints on the next
3 instruction of some type (call, branch, etc.)
4 """
5
6 import re
7
8 import capstone
9 import gdb
10
11 import pwndbg.disasm
12 import pwndbg.gdblib.events
13 import pwndbg.gdblib.proc
14 import pwndbg.gdblib.regs
15 from pwndbg.color import message
16
17 jumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET))
18
19 interrupts = set((capstone.CS_GRP_INT,))
20
21
22 @pwndbg.gdblib.events.exit
23 def clear_temp_breaks():
24 if not pwndbg.gdblib.proc.alive:
25 breakpoints = gdb.breakpoints()
26 if breakpoints:
27 for bp in breakpoints:
28 if (
29 bp.temporary and not bp.visible
30 ): # visible is used instead of internal because older gdb's don't support internal
31 bp.delete()
32
33
34 def next_int(address=None):
35 """
36 If there is a syscall in the current basic black,
37 return the instruction of the one closest to $PC.
38
39 Otherwise, return None.
40 """
41 if address is None:
42 ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)
43 if not ins:
44 return None
45 address = ins.next
46
47 ins = pwndbg.disasm.one(address)
48 while ins:
49 if set(ins.groups) & jumps:
50 return None
51 if set(ins.groups) & interrupts:
52 return ins
53 ins = pwndbg.disasm.one(ins.next)
54
55 return None
56
57
58 def next_branch(address=None):
59 if address is None:
60 ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)
61 if not ins:
62 return None
63 address = ins.next
64
65 ins = pwndbg.disasm.one(address)
66 while ins:
67 if set(ins.groups) & jumps:
68 return ins
69 ins = pwndbg.disasm.one(ins.next)
70
71 return None
72
73
74 def break_next_branch(address=None):
75 ins = next_branch(address)
76
77 if ins:
78 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True)
79 gdb.execute("continue", from_tty=False, to_string=True)
80 return ins
81
82
83 def break_next_interrupt(address=None):
84 ins = next_int(address)
85
86 if ins:
87 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True)
88 gdb.execute("continue", from_tty=False, to_string=True)
89 return ins
90
91
92 def break_next_call(symbol_regex=None):
93 while pwndbg.gdblib.proc.alive:
94 ins = break_next_branch()
95
96 if not ins:
97 break
98
99 # continue if not a call
100 if capstone.CS_GRP_CALL not in ins.groups:
101 continue
102
103 # return call if we don't search for a symbol
104 if not symbol_regex:
105 return ins
106
107 # return call if we match target address
108 if ins.target_const and re.match("%s$" % symbol_regex, hex(ins.target)):
109 return ins
110
111 # return call if we match symbol name
112 if ins.symbol and re.match("%s$" % symbol_regex, ins.symbol):
113 return ins
114
115
116 def break_next_ret(address=None):
117 while pwndbg.gdblib.proc.alive:
118 ins = break_next_branch(address)
119
120 if not ins:
121 break
122
123 if capstone.CS_GRP_RET in ins.groups:
124 return ins
125
126
127 def break_on_program_code():
128 """
129 Breaks on next instruction that belongs to process' objfile code.
130 :return: True for success, False when process ended or when pc is at the code.
131 """
132 exe = pwndbg.gdblib.proc.exe
133 binary_exec_page_ranges = [
134 (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute
135 ]
136
137 pc = pwndbg.gdblib.regs.pc
138 for start, end in binary_exec_page_ranges:
139 if start <= pc < end:
140 print(message.error("The pc is already at the binary objfile code. Not stepping."))
141 return False
142
143 while pwndbg.gdblib.proc.alive:
144 gdb.execute("si", from_tty=False, to_string=False)
145
146 pc = pwndbg.gdblib.regs.pc
147 for start, end in binary_exec_page_ranges:
148 if start <= pc < end:
149 return True
150
151 return False
152
153
154 def break_on_next(address=None):
155 address = address or pwndbg.gdblib.regs.pc
156 ins = pwndbg.disasm.one(address)
157
158 gdb.Breakpoint("*%#x" % (ins.address + ins.size), temporary=True)
159 gdb.execute("continue", from_tty=False, to_string=True)
160
[end of pwndbg/gdblib/next.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/gdblib/next.py b/pwndbg/gdblib/next.py
--- a/pwndbg/gdblib/next.py
+++ b/pwndbg/gdblib/next.py
@@ -91,6 +91,10 @@
def break_next_call(symbol_regex=None):
while pwndbg.gdblib.proc.alive:
+ # Break on signal as it may be a segfault
+ if pwndbg.gdblib.proc.stopped_with_signal:
+ return
+
ins = break_next_branch()
if not ins:
@@ -115,6 +119,10 @@
def break_next_ret(address=None):
while pwndbg.gdblib.proc.alive:
+ # Break on signal as it may be a segfault
+ if pwndbg.gdblib.proc.stopped_with_signal:
+ return
+
ins = break_next_branch(address)
if not ins:
@@ -126,13 +134,14 @@
def break_on_program_code():
"""
- Breaks on next instruction that belongs to process' objfile code.
- :return: True for success, False when process ended or when pc is at the code.
+ Breaks on next instruction that belongs to process' objfile code
+
+ :return: True for success, False when process ended or when pc is not at the code or if a signal occurred
"""
exe = pwndbg.gdblib.proc.exe
- binary_exec_page_ranges = [
+ binary_exec_page_ranges = tuple(
(p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute
- ]
+ )
pc = pwndbg.gdblib.regs.pc
for start, end in binary_exec_page_ranges:
@@ -140,12 +149,18 @@
print(message.error("The pc is already at the binary objfile code. Not stepping."))
return False
- while pwndbg.gdblib.proc.alive:
- gdb.execute("si", from_tty=False, to_string=False)
+ proc = pwndbg.gdblib.proc
+ regs = pwndbg.gdblib.regs
+
+ while proc.alive:
+ # Break on signal as it may be a segfault
+ if proc.stopped_with_signal:
+ return False
+
+ o = gdb.execute("si", from_tty=False, to_string=True)
- pc = pwndbg.gdblib.regs.pc
for start, end in binary_exec_page_ranges:
- if start <= pc < end:
+ if start <= regs.pc < end:
return True
return False
diff --git a/pwndbg/gdblib/proc.py b/pwndbg/gdblib/proc.py
--- a/pwndbg/gdblib/proc.py
+++ b/pwndbg/gdblib/proc.py
@@ -42,6 +42,11 @@
@property
def alive(self):
+ """
+ Informs whether the process has a thread. However, note that it will
+ still return True for a segfaulted thread. To detect that, consider
+ using the `stopped_with_signal` method.
+ """
return gdb.selected_thread() is not None
@property
@@ -56,6 +61,15 @@
"""
return gdb.selected_thread().is_stopped()
+ @property
+ def stopped_with_signal(self) -> bool:
+ """
+ Returns whether the program has stopped with a signal
+
+ Can be used to detect segfaults (but will also detect other signals)
+ """
+ return "It stopped with signal " in gdb.execute("info program", to_string=True)
+
@property
def exe(self):
"""
|
{"golden_diff": "diff --git a/pwndbg/gdblib/next.py b/pwndbg/gdblib/next.py\n--- a/pwndbg/gdblib/next.py\n+++ b/pwndbg/gdblib/next.py\n@@ -91,6 +91,10 @@\n \n def break_next_call(symbol_regex=None):\n while pwndbg.gdblib.proc.alive:\n+ # Break on signal as it may be a segfault\n+ if pwndbg.gdblib.proc.stopped_with_signal:\n+ return\n+\n ins = break_next_branch()\n \n if not ins:\n@@ -115,6 +119,10 @@\n \n def break_next_ret(address=None):\n while pwndbg.gdblib.proc.alive:\n+ # Break on signal as it may be a segfault\n+ if pwndbg.gdblib.proc.stopped_with_signal:\n+ return\n+\n ins = break_next_branch(address)\n \n if not ins:\n@@ -126,13 +134,14 @@\n \n def break_on_program_code():\n \"\"\"\n- Breaks on next instruction that belongs to process' objfile code.\n- :return: True for success, False when process ended or when pc is at the code.\n+ Breaks on next instruction that belongs to process' objfile code\n+\n+ :return: True for success, False when process ended or when pc is not at the code or if a signal occurred\n \"\"\"\n exe = pwndbg.gdblib.proc.exe\n- binary_exec_page_ranges = [\n+ binary_exec_page_ranges = tuple(\n (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute\n- ]\n+ )\n \n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n@@ -140,12 +149,18 @@\n print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n return False\n \n- while pwndbg.gdblib.proc.alive:\n- gdb.execute(\"si\", from_tty=False, to_string=False)\n+ proc = pwndbg.gdblib.proc\n+ regs = pwndbg.gdblib.regs\n+\n+ while proc.alive:\n+ # Break on signal as it may be a segfault\n+ if proc.stopped_with_signal:\n+ return False\n+\n+ o = gdb.execute(\"si\", from_tty=False, to_string=True)\n \n- pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n- if start <= pc < end:\n+ if start <= regs.pc < end:\n return True\n \n return False\ndiff --git a/pwndbg/gdblib/proc.py b/pwndbg/gdblib/proc.py\n--- a/pwndbg/gdblib/proc.py\n+++ b/pwndbg/gdblib/proc.py\n@@ -42,6 +42,11 @@\n \n @property\n def alive(self):\n+ \"\"\"\n+ Informs whether the process has a thread. However, note that it will\n+ still return True for a segfaulted thread. To detect that, consider\n+ using the `stopped_with_signal` method.\n+ \"\"\"\n return gdb.selected_thread() is not None\n \n @property\n@@ -56,6 +61,15 @@\n \"\"\"\n return gdb.selected_thread().is_stopped()\n \n+ @property\n+ def stopped_with_signal(self) -> bool:\n+ \"\"\"\n+ Returns whether the program has stopped with a signal\n+\n+ Can be used to detect segfaults (but will also detect other signals)\n+ \"\"\"\n+ return \"It stopped with signal \" in gdb.execute(\"info program\", to_string=True)\n+\n @property\n def exe(self):\n \"\"\"\n", "issue": "`nextret` keeps going after the binary segfaults\nIf the binary segfaults when running `nextret` (and potentially the other `next*` commands), it keeps trying to continue until you kill it:\r\n```\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca \"%s\", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c\r\nommon/vfscanf-internal.c:339\r\n339 in ./stdio-common/vfscanf-internal.c\r\n\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca \"%s\", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c\r\nommon/vfscanf-internal.c:339\r\n339 in ./stdio-common/vfscanf-internal.c\r\n\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007f7a8b4f9ae4 in __vfscanf_internal (s=0x7f7a8b6b0aa0 <_IO_2_1_stdin_>, format=0x4020ca \"%s\", argptr=argptr@entry=0x7ffeb13483a8, mode_flags=mode_flags@entry=2) at ./stdio-c\r\nommon/vfscanf-internal.c:339\r\n339 in ./stdio-common/vfscanf-internal.c\r\n```\r\n\r\nWe should stop after the first segfault and return to the prompt.\n", "before_files": [{"content": "\"\"\"\nProvides values which would be available from /proc which\nare not fulfilled by other modules and some process/gdb flow\nrelated information.\n\"\"\"\n\nimport functools\nimport sys\nfrom types import ModuleType\nfrom typing import Any\nfrom typing import Callable\n\nimport gdb\n\nimport pwndbg.gdblib.qemu\nimport pwndbg.lib.memoize\n\n\nclass module(ModuleType):\n @property\n def pid(self):\n # QEMU usermode emulation always returns 42000 for some reason.\n # In any case, we can't use the info.\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_inferior()\n if i is not None:\n return i.pid\n return 0\n\n @property\n def tid(self):\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_thread()\n if i is not None:\n return i.ptid[1]\n\n return self.pid\n\n @property\n def alive(self):\n return gdb.selected_thread() is not None\n\n @property\n def thread_is_stopped(self):\n \"\"\"\n This detects whether selected thread is stopped.\n It is not stopped in situations when gdb is executing commands\n that are attached to a breakpoint by `command` command.\n\n For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 )\n :return: Whether gdb executes commands attached to bp with `command` command.\n \"\"\"\n return gdb.selected_thread().is_stopped()\n\n @property\n def exe(self):\n \"\"\"\n Returns the debugged file name.\n\n On remote targets, this may be prefixed with \"target:\" string.\n See this by executing those in two terminals:\n 1. gdbserver 127.0.0.1:1234 /bin/ls\n 2. gdb -ex \"target remote :1234\" -ex \"pi pwndbg.gdblib.proc.exe\"\n\n If you need to process the debugged file use:\n `pwndbg.gdblib.file.get_file(pwndbg.gdblib.proc.exe)`\n \"\"\"\n return gdb.current_progspace().filename\n\n def OnlyWhenRunning(self, func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n if self.alive:\n return func(*a, **kw)\n\n return wrapper\n\n\nOnlyWhenRunning: Callable[[Any], Any]\n# To prevent garbage collection\ntether = sys.modules[__name__]\n\nsys.modules[__name__] = module(__name__, \"\")\n", "path": "pwndbg/gdblib/proc.py"}, {"content": "\"\"\"\nCommands for setting temporary breakpoints on the next\ninstruction of some type (call, branch, etc.)\n\"\"\"\n\nimport re\n\nimport capstone\nimport gdb\n\nimport pwndbg.disasm\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.proc\nimport pwndbg.gdblib.regs\nfrom pwndbg.color import message\n\njumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET))\n\ninterrupts = set((capstone.CS_GRP_INT,))\n\n\[email protected]\ndef clear_temp_breaks():\n if not pwndbg.gdblib.proc.alive:\n breakpoints = gdb.breakpoints()\n if breakpoints:\n for bp in breakpoints:\n if (\n bp.temporary and not bp.visible\n ): # visible is used instead of internal because older gdb's don't support internal\n bp.delete()\n\n\ndef next_int(address=None):\n \"\"\"\n If there is a syscall in the current basic black,\n return the instruction of the one closest to $PC.\n\n Otherwise, return None.\n \"\"\"\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return None\n if set(ins.groups) & interrupts:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef next_branch(address=None):\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef break_next_branch(address=None):\n ins = next_branch(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_interrupt(address=None):\n ins = next_int(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_call(symbol_regex=None):\n while pwndbg.gdblib.proc.alive:\n ins = break_next_branch()\n\n if not ins:\n break\n\n # continue if not a call\n if capstone.CS_GRP_CALL not in ins.groups:\n continue\n\n # return call if we don't search for a symbol\n if not symbol_regex:\n return ins\n\n # return call if we match target address\n if ins.target_const and re.match(\"%s$\" % symbol_regex, hex(ins.target)):\n return ins\n\n # return call if we match symbol name\n if ins.symbol and re.match(\"%s$\" % symbol_regex, ins.symbol):\n return ins\n\n\ndef break_next_ret(address=None):\n while pwndbg.gdblib.proc.alive:\n ins = break_next_branch(address)\n\n if not ins:\n break\n\n if capstone.CS_GRP_RET in ins.groups:\n return ins\n\n\ndef break_on_program_code():\n \"\"\"\n Breaks on next instruction that belongs to process' objfile code.\n :return: True for success, False when process ended or when pc is at the code.\n \"\"\"\n exe = pwndbg.gdblib.proc.exe\n binary_exec_page_ranges = [\n (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute\n ]\n\n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n if start <= pc < end:\n print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n return False\n\n while pwndbg.gdblib.proc.alive:\n gdb.execute(\"si\", from_tty=False, to_string=False)\n\n pc = pwndbg.gdblib.regs.pc\n for start, end in binary_exec_page_ranges:\n if start <= pc < end:\n return True\n\n return False\n\n\ndef break_on_next(address=None):\n address = address or pwndbg.gdblib.regs.pc\n ins = pwndbg.disasm.one(address)\n\n gdb.Breakpoint(\"*%#x\" % (ins.address + ins.size), temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n", "path": "pwndbg/gdblib/next.py"}]}
| 3,250 | 859 |
gh_patches_debug_8333
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-771
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sc.tl.umap error with init_pos="paga"
Hello,
Trying to use sc.tl.umap with initial positions from sc.tl.paga. Seems an error with UMAP from the error log. But it is only called when calling paga positions, UMAP works otherwise.
I do see https://github.com/theislab/scanpy/issues/666, and https://github.com/lmcinnes/umap/pull/262, but I am already running scanpy 1.4.4 and umap 0.3.9 so I don't understand how to solve the issue?
Attached error. Any suggestions?
Thanks!
<img width="575" alt="Screen Shot 2019-08-05 at 19 02 18" src="https://user-images.githubusercontent.com/20108378/62485139-c560df80-b7b3-11e9-8333-7e511c263a79.png">
</issue>
<code>
[start of scanpy/tools/_umap.py]
1 import numpy as np
2 from pandas.api.types import is_numeric_dtype
3 from sklearn.utils import check_random_state, check_array
4
5 from ._utils import get_init_pos_from_paga, choose_representation
6 from .._settings import settings
7 from .. import logging as logg
8
9
10 def umap(
11 adata,
12 min_dist=0.5,
13 spread=1.0,
14 n_components=2,
15 maxiter=None,
16 alpha=1.0,
17 gamma=1.0,
18 negative_sample_rate=5,
19 init_pos='spectral',
20 random_state=0,
21 a=None,
22 b=None,
23 copy=False,
24 ):
25 """Embed the neighborhood graph using UMAP [McInnes18]_.
26
27 UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
28 technique suitable for visualizing high-dimensional data. Besides tending to
29 be faster than tSNE, it optimizes the embedding such that it best reflects
30 the topology of the data, which we represent throughout Scanpy using a
31 neighborhood graph. tSNE, by contrast, optimizes the distribution of
32 nearest-neighbor distances in the embedding such that these best match the
33 distribution of distances in the high-dimensional space. We use the
34 implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
35 [McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
36 <https://doi.org/10.1101/298430>`__.
37
38 Parameters
39 ----------
40 adata : :class:`~anndata.AnnData`
41 Annotated data matrix.
42 min_dist : `float`, optional (default: 0.5)
43 The effective minimum distance between embedded points. Smaller values
44 will result in a more clustered/clumped embedding where nearby points on
45 the manifold are drawn closer together, while larger values will result
46 on a more even dispersal of points. The value should be set relative to
47 the ``spread`` value, which determines the scale at which embedded
48 points will be spread out. The default of in the `umap-learn` package is
49 0.1.
50 spread : `float` (optional, default 1.0)
51 The effective scale of embedded points. In combination with `min_dist`
52 this determines how clustered/clumped the embedded points are.
53 n_components : `int`, optional (default: 2)
54 The number of dimensions of the embedding.
55 maxiter : `int`, optional (default: `None`)
56 The number of iterations (epochs) of the optimization. Called `n_epochs`
57 in the original UMAP.
58 alpha : `float`, optional (default: 1.0)
59 The initial learning rate for the embedding optimization.
60 gamma : `float` (optional, default 1.0)
61 Weighting applied to negative samples in low dimensional embedding
62 optimization. Values higher than one will result in greater weight
63 being given to negative samples.
64 negative_sample_rate : `int` (optional, default 5)
65 The number of negative edge/1-simplex samples to use per positive
66 edge/1-simplex sample in optimizing the low dimensional embedding.
67 init_pos : `string` or `np.array`, optional (default: 'spectral')
68 How to initialize the low dimensional embedding. Called `init` in the
69 original UMAP.
70 Options are:
71
72 * Any key for `adata.obsm`.
73 * 'paga': positions from :func:`~scanpy.api.pl.paga`.
74 * 'spectral': use a spectral embedding of the graph.
75 * 'random': assign initial embedding positions at random.
76 * A numpy array of initial embedding positions.
77 random_state : `int`, `RandomState` or `None`, optional (default: 0)
78 If `int`, `random_state` is the seed used by the random number generator;
79 If `RandomState`, `random_state` is the random number generator;
80 If `None`, the random number generator is the `RandomState` instance used
81 by `np.random`.
82 a : `float` (optional, default `None`)
83 More specific parameters controlling the embedding. If `None` these
84 values are set automatically as determined by `min_dist` and
85 `spread`.
86 b : `float` (optional, default `None`)
87 More specific parameters controlling the embedding. If `None` these
88 values are set automatically as determined by `min_dist` and
89 `spread`.
90 copy : `bool` (default: `False`)
91 Return a copy instead of writing to adata.
92
93 Returns
94 -------
95 Depending on `copy`, returns or updates `adata` with the following fields.
96
97 **X_umap** : `adata.obsm` field
98 UMAP coordinates of data.
99 """
100 adata = adata.copy() if copy else adata
101 if 'neighbors' not in adata.uns:
102 raise ValueError(
103 'Did not find \'neighbors/connectivities\'. Run `sc.pp.neighbors` first.')
104 start = logg.info('computing UMAP')
105 if ('params' not in adata.uns['neighbors']
106 or adata.uns['neighbors']['params']['method'] != 'umap'):
107 logg.warning('neighbors/connectivities have not been computed using umap')
108 from umap.umap_ import find_ab_params, simplicial_set_embedding
109 if a is None or b is None:
110 a, b = find_ab_params(spread, min_dist)
111 else:
112 a = a
113 b = b
114
115 if isinstance(init_pos, str) and init_pos in adata.obsm.keys():
116 init_coords = adata.obsm[init_pos]
117 elif isinstance(init_pos, str) and init_pos == 'paga':
118 init_coords = get_init_pos_from_paga(adata, random_state=random_state)
119 else:
120 init_coords = init_pos # Let umap handle it
121 if hasattr(init_coords, "dtype") and is_numeric_dtype(init_pos):
122 init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
123
124 random_state = check_random_state(random_state)
125 n_epochs = 0 if maxiter is None else maxiter
126 neigh_params = adata.uns['neighbors']['params']
127 X = choose_representation(
128 adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)
129 # the data matrix X is really only used for determining the number of connected components
130 # for the init condition in the UMAP embedding
131 X_umap = simplicial_set_embedding(
132 X,
133 adata.uns['neighbors']['connectivities'].tocoo(),
134 n_components,
135 alpha,
136 a,
137 b,
138 gamma,
139 negative_sample_rate,
140 n_epochs,
141 init_coords,
142 random_state,
143 neigh_params.get('metric', 'euclidean'),
144 neigh_params.get('metric_kwds', {}),
145 verbose=settings.verbosity > 3,
146 )
147 adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates
148 logg.info(
149 ' finished',
150 time=start,
151 deep=(
152 'added\n'
153 " 'X_umap', UMAP coordinates (adata.obsm)"
154 ),
155 )
156 return adata if copy else None
157
[end of scanpy/tools/_umap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scanpy/tools/_umap.py b/scanpy/tools/_umap.py
--- a/scanpy/tools/_umap.py
+++ b/scanpy/tools/_umap.py
@@ -118,7 +118,7 @@
init_coords = get_init_pos_from_paga(adata, random_state=random_state)
else:
init_coords = init_pos # Let umap handle it
- if hasattr(init_coords, "dtype") and is_numeric_dtype(init_pos):
+ if hasattr(init_coords, "dtype"):
init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)
random_state = check_random_state(random_state)
|
{"golden_diff": "diff --git a/scanpy/tools/_umap.py b/scanpy/tools/_umap.py\n--- a/scanpy/tools/_umap.py\n+++ b/scanpy/tools/_umap.py\n@@ -118,7 +118,7 @@\n init_coords = get_init_pos_from_paga(adata, random_state=random_state)\n else:\n init_coords = init_pos # Let umap handle it\n- if hasattr(init_coords, \"dtype\") and is_numeric_dtype(init_pos):\n+ if hasattr(init_coords, \"dtype\"):\n init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)\n \n random_state = check_random_state(random_state)\n", "issue": "sc.tl.umap error with init_pos=\"paga\"\nHello,\r\n\r\nTrying to use sc.tl.umap with initial positions from sc.tl.paga. Seems an error with UMAP from the error log. But it is only called when calling paga positions, UMAP works otherwise.\r\n\r\nI do see https://github.com/theislab/scanpy/issues/666, and https://github.com/lmcinnes/umap/pull/262, but I am already running scanpy 1.4.4 and umap 0.3.9 so I don't understand how to solve the issue?\r\n\r\nAttached error. Any suggestions? \r\nThanks!\r\n\r\n<img width=\"575\" alt=\"Screen Shot 2019-08-05 at 19 02 18\" src=\"https://user-images.githubusercontent.com/20108378/62485139-c560df80-b7b3-11e9-8333-7e511c263a79.png\">\r\n\n", "before_files": [{"content": "import numpy as np\nfrom pandas.api.types import is_numeric_dtype\nfrom sklearn.utils import check_random_state, check_array\n\nfrom ._utils import get_init_pos_from_paga, choose_representation\nfrom .._settings import settings\nfrom .. import logging as logg\n\n\ndef umap(\n adata,\n min_dist=0.5,\n spread=1.0,\n n_components=2,\n maxiter=None,\n alpha=1.0,\n gamma=1.0,\n negative_sample_rate=5,\n init_pos='spectral',\n random_state=0,\n a=None,\n b=None,\n copy=False,\n):\n \"\"\"Embed the neighborhood graph using UMAP [McInnes18]_.\n\n UMAP (Uniform Manifold Approximation and Projection) is a manifold learning\n technique suitable for visualizing high-dimensional data. Besides tending to\n be faster than tSNE, it optimizes the embedding such that it best reflects\n the topology of the data, which we represent throughout Scanpy using a\n neighborhood graph. tSNE, by contrast, optimizes the distribution of\n nearest-neighbor distances in the embedding such that these best match the\n distribution of distances in the high-dimensional space. We use the\n implementation of `umap-learn <https://github.com/lmcinnes/umap>`__\n [McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint\n <https://doi.org/10.1101/298430>`__.\n\n Parameters\n ----------\n adata : :class:`~anndata.AnnData`\n Annotated data matrix.\n min_dist : `float`, optional (default: 0.5)\n The effective minimum distance between embedded points. Smaller values\n will result in a more clustered/clumped embedding where nearby points on\n the manifold are drawn closer together, while larger values will result\n on a more even dispersal of points. The value should be set relative to\n the ``spread`` value, which determines the scale at which embedded\n points will be spread out. The default of in the `umap-learn` package is\n 0.1.\n spread : `float` (optional, default 1.0)\n The effective scale of embedded points. In combination with `min_dist`\n this determines how clustered/clumped the embedded points are.\n n_components : `int`, optional (default: 2)\n The number of dimensions of the embedding.\n maxiter : `int`, optional (default: `None`)\n The number of iterations (epochs) of the optimization. Called `n_epochs`\n in the original UMAP.\n alpha : `float`, optional (default: 1.0)\n The initial learning rate for the embedding optimization.\n gamma : `float` (optional, default 1.0)\n Weighting applied to negative samples in low dimensional embedding\n optimization. Values higher than one will result in greater weight\n being given to negative samples.\n negative_sample_rate : `int` (optional, default 5)\n The number of negative edge/1-simplex samples to use per positive\n edge/1-simplex sample in optimizing the low dimensional embedding.\n init_pos : `string` or `np.array`, optional (default: 'spectral')\n How to initialize the low dimensional embedding. Called `init` in the\n original UMAP.\n Options are:\n\n * Any key for `adata.obsm`.\n * 'paga': positions from :func:`~scanpy.api.pl.paga`.\n * 'spectral': use a spectral embedding of the graph.\n * 'random': assign initial embedding positions at random.\n * A numpy array of initial embedding positions.\n random_state : `int`, `RandomState` or `None`, optional (default: 0)\n If `int`, `random_state` is the seed used by the random number generator;\n If `RandomState`, `random_state` is the random number generator;\n If `None`, the random number generator is the `RandomState` instance used\n by `np.random`.\n a : `float` (optional, default `None`)\n More specific parameters controlling the embedding. If `None` these\n values are set automatically as determined by `min_dist` and\n `spread`.\n b : `float` (optional, default `None`)\n More specific parameters controlling the embedding. If `None` these\n values are set automatically as determined by `min_dist` and\n `spread`.\n copy : `bool` (default: `False`)\n Return a copy instead of writing to adata.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with the following fields.\n\n **X_umap** : `adata.obsm` field\n UMAP coordinates of data.\n \"\"\"\n adata = adata.copy() if copy else adata\n if 'neighbors' not in adata.uns:\n raise ValueError(\n 'Did not find \\'neighbors/connectivities\\'. Run `sc.pp.neighbors` first.')\n start = logg.info('computing UMAP')\n if ('params' not in adata.uns['neighbors']\n or adata.uns['neighbors']['params']['method'] != 'umap'):\n logg.warning('neighbors/connectivities have not been computed using umap')\n from umap.umap_ import find_ab_params, simplicial_set_embedding\n if a is None or b is None:\n a, b = find_ab_params(spread, min_dist)\n else:\n a = a\n b = b\n\n if isinstance(init_pos, str) and init_pos in adata.obsm.keys():\n init_coords = adata.obsm[init_pos]\n elif isinstance(init_pos, str) and init_pos == 'paga':\n init_coords = get_init_pos_from_paga(adata, random_state=random_state)\n else:\n init_coords = init_pos # Let umap handle it\n if hasattr(init_coords, \"dtype\") and is_numeric_dtype(init_pos):\n init_coords = check_array(init_coords, dtype=np.float32, accept_sparse=False)\n\n random_state = check_random_state(random_state)\n n_epochs = 0 if maxiter is None else maxiter\n neigh_params = adata.uns['neighbors']['params']\n X = choose_representation(\n adata, neigh_params.get('use_rep', None), neigh_params.get('n_pcs', None), silent=True)\n # the data matrix X is really only used for determining the number of connected components\n # for the init condition in the UMAP embedding\n X_umap = simplicial_set_embedding(\n X,\n adata.uns['neighbors']['connectivities'].tocoo(),\n n_components,\n alpha,\n a,\n b,\n gamma,\n negative_sample_rate,\n n_epochs,\n init_coords,\n random_state,\n neigh_params.get('metric', 'euclidean'),\n neigh_params.get('metric_kwds', {}),\n verbose=settings.verbosity > 3,\n )\n adata.obsm['X_umap'] = X_umap # annotate samples with UMAP coordinates\n logg.info(\n ' finished',\n time=start,\n deep=(\n 'added\\n'\n \" 'X_umap', UMAP coordinates (adata.obsm)\"\n ),\n )\n return adata if copy else None\n", "path": "scanpy/tools/_umap.py"}]}
| 2,748 | 150 |
gh_patches_debug_7196
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-16020
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Splash screen can take a lot of space in low resolution screens
When starting Spyder in my laptop screen (1366 x 768) the splash looks like this (kind of big):

However in my secondary screen (1920×1080) the splash looks better:

</issue>
<code>
[start of spyder/app/utils.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """Utility functions for the Spyder application."""
8
9 # Standard library imports
10 import glob
11 import logging
12 import os
13 import os.path as osp
14 import re
15 import sys
16
17 # Third-party imports
18 import psutil
19 from qtpy.QtCore import QCoreApplication, Qt
20 from qtpy.QtGui import QColor, QPalette, QPixmap
21 from qtpy.QtWidgets import QSplashScreen
22
23 # Local imports
24 from spyder.config.base import (DEV, get_conf_path, get_debug_level,
25 running_under_pytest)
26 from spyder.utils.image_path_manager import get_image_path
27 from spyder.utils.qthelpers import file_uri
28 from spyder.utils.external.dafsa.dafsa import DAFSA
29 from spyder.utils.stylesheet import QStylePalette
30
31 # For spyder-ide/spyder#7447.
32 try:
33 from qtpy.QtQuick import QQuickWindow, QSGRendererInterface
34 except Exception:
35 QQuickWindow = QSGRendererInterface = None
36
37
38 root_logger = logging.getLogger()
39 FILTER_NAMES = os.environ.get('SPYDER_FILTER_LOG', "").split(',')
40 FILTER_NAMES = [f.strip() for f in FILTER_NAMES]
41
42
43 class Spy:
44 """
45 This is used to inject a 'spy' object in the internal console
46 namespace to inspect Spyder internals.
47
48 Attributes:
49 app Reference to main QApplication object
50 window Reference to spyder.MainWindow widget
51 """
52 def __init__(self, app, window):
53 self.app = app
54 self.window = window
55
56 def __dir__(self):
57 return (list(self.__dict__.keys()) +
58 [x for x in dir(self.__class__) if x[0] != '_'])
59
60
61 def get_python_doc_path():
62 """
63 Return Python documentation path
64 (Windows: return the PythonXX.chm path if available)
65 """
66 if os.name == 'nt':
67 doc_path = osp.join(sys.prefix, "Doc")
68 if not osp.isdir(doc_path):
69 return
70 python_chm = [path for path in os.listdir(doc_path)
71 if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
72 if python_chm:
73 return file_uri(osp.join(doc_path, python_chm[0]))
74 else:
75 vinf = sys.version_info
76 doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
77 python_doc = osp.join(doc_path, "index.html")
78 if osp.isfile(python_doc):
79 return file_uri(python_doc)
80
81
82 def set_opengl_implementation(option):
83 """
84 Set the OpenGL implementation used by Spyder.
85
86 See spyder-ide/spyder#7447 for the details.
87 """
88 if option == 'software':
89 QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)
90 if QQuickWindow is not None:
91 QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)
92 elif option == 'desktop':
93 QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)
94 if QQuickWindow is not None:
95 QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
96 elif option == 'gles':
97 QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)
98 if QQuickWindow is not None:
99 QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)
100
101
102 def setup_logging(cli_options):
103 """Setup logging with cli options defined by the user."""
104 if cli_options.debug_info or get_debug_level() > 0:
105 levels = {2: logging.INFO, 3: logging.DEBUG}
106 log_level = levels[get_debug_level()]
107 log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'
108
109 console_filters = cli_options.filter_log.split(',')
110 console_filters = [x.strip() for x in console_filters]
111 console_filters = console_filters + FILTER_NAMES
112 console_filters = [x for x in console_filters if x != '']
113
114 handlers = [logging.StreamHandler()]
115 if cli_options.debug_output == 'file':
116 log_file = 'spyder-debug.log'
117 handlers.append(
118 logging.FileHandler(filename=log_file, mode='w+')
119 )
120 else:
121 log_file = None
122
123 match_func = lambda x: True
124 if console_filters != [''] and len(console_filters) > 0:
125 dafsa = DAFSA(console_filters)
126 match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)
127 is not None)
128
129 formatter = logging.Formatter(log_format)
130
131 class ModuleFilter(logging.Filter):
132 """Filter messages based on module name prefix."""
133
134 def filter(self, record):
135 return match_func(record.name)
136
137 filter = ModuleFilter()
138 root_logger.setLevel(log_level)
139 for handler in handlers:
140 handler.addFilter(filter)
141 handler.setFormatter(formatter)
142 handler.setLevel(log_level)
143 root_logger.addHandler(handler)
144
145
146 def delete_lsp_log_files():
147 """Delete previous dead Spyder instances LSP log files."""
148 regex = re.compile(r'.*_.*_(\d+)[.]log')
149 files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
150 for f in files:
151 match = regex.match(f)
152 if match is not None:
153 pid = int(match.group(1))
154 if not psutil.pid_exists(pid):
155 os.remove(f)
156
157
158 def qt_message_handler(msg_type, msg_log_context, msg_string):
159 """
160 Qt warning messages are intercepted by this handler.
161
162 On some operating systems, warning messages might be displayed
163 even if the actual message does not apply. This filter adds a
164 blacklist for messages that are being printed for no apparent
165 reason. Anything else will get printed in the internal console.
166
167 In DEV mode, all messages are printed.
168 """
169 BLACKLIST = [
170 'QMainWidget::resizeDocks: all sizes need to be larger than 0',
171 ]
172 if DEV or msg_string not in BLACKLIST:
173 print(msg_string) # spyder: test-skip
174
175
176 def create_splash_screen():
177 """Create splash screen."""
178 if not running_under_pytest():
179 splash = QSplashScreen(QPixmap(get_image_path('splash')))
180 splash_font = splash.font()
181 splash_font.setPixelSize(14)
182 splash.setFont(splash_font)
183 else:
184 splash = None
185
186 return splash
187
188
189 def set_links_color(app):
190 """
191 Fix color for links.
192
193 This was taken from QDarkstyle, which is MIT licensed.
194 """
195 color = QStylePalette.COLOR_ACCENT_3
196 qcolor = QColor(color)
197
198 app_palette = app.palette()
199 app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)
200 app.setPalette(app_palette)
201
[end of spyder/app/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spyder/app/utils.py b/spyder/app/utils.py
--- a/spyder/app/utils.py
+++ b/spyder/app/utils.py
@@ -176,7 +176,9 @@
def create_splash_screen():
"""Create splash screen."""
if not running_under_pytest():
- splash = QSplashScreen(QPixmap(get_image_path('splash')))
+ pixmap = QPixmap(get_image_path('splash'))
+ splash = QSplashScreen(
+ pixmap.scaledToWidth(500, Qt.SmoothTransformation))
splash_font = splash.font()
splash_font.setPixelSize(14)
splash.setFont(splash_font)
|
{"golden_diff": "diff --git a/spyder/app/utils.py b/spyder/app/utils.py\n--- a/spyder/app/utils.py\n+++ b/spyder/app/utils.py\n@@ -176,7 +176,9 @@\n def create_splash_screen():\n \"\"\"Create splash screen.\"\"\"\n if not running_under_pytest():\n- splash = QSplashScreen(QPixmap(get_image_path('splash')))\n+ pixmap = QPixmap(get_image_path('splash'))\n+ splash = QSplashScreen(\n+ pixmap.scaledToWidth(500, Qt.SmoothTransformation))\n splash_font = splash.font()\n splash_font.setPixelSize(14)\n splash.setFont(splash_font)\n", "issue": "Splash screen can take a lot of space in low resolution screens\nWhen starting Spyder in my laptop screen (1366 x 768) the splash looks like this (kind of big):\r\n\r\n\r\n\r\n\r\nHowever in my secondary screen (1920\u00d71080) the splash looks better:\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Utility functions for the Spyder application.\"\"\"\n\n# Standard library imports\nimport glob\nimport logging\nimport os\nimport os.path as osp\nimport re\nimport sys\n\n# Third-party imports\nimport psutil\nfrom qtpy.QtCore import QCoreApplication, Qt\nfrom qtpy.QtGui import QColor, QPalette, QPixmap\nfrom qtpy.QtWidgets import QSplashScreen\n\n# Local imports\nfrom spyder.config.base import (DEV, get_conf_path, get_debug_level,\n running_under_pytest)\nfrom spyder.utils.image_path_manager import get_image_path\nfrom spyder.utils.qthelpers import file_uri\nfrom spyder.utils.external.dafsa.dafsa import DAFSA\nfrom spyder.utils.stylesheet import QStylePalette\n\n# For spyder-ide/spyder#7447.\ntry:\n from qtpy.QtQuick import QQuickWindow, QSGRendererInterface\nexcept Exception:\n QQuickWindow = QSGRendererInterface = None\n\n\nroot_logger = logging.getLogger()\nFILTER_NAMES = os.environ.get('SPYDER_FILTER_LOG', \"\").split(',')\nFILTER_NAMES = [f.strip() for f in FILTER_NAMES]\n\n\nclass Spy:\n \"\"\"\n This is used to inject a 'spy' object in the internal console\n namespace to inspect Spyder internals.\n\n Attributes:\n app Reference to main QApplication object\n window Reference to spyder.MainWindow widget\n \"\"\"\n def __init__(self, app, window):\n self.app = app\n self.window = window\n\n def __dir__(self):\n return (list(self.__dict__.keys()) +\n [x for x in dir(self.__class__) if x[0] != '_'])\n\n\ndef get_python_doc_path():\n \"\"\"\n Return Python documentation path\n (Windows: return the PythonXX.chm path if available)\n \"\"\"\n if os.name == 'nt':\n doc_path = osp.join(sys.prefix, \"Doc\")\n if not osp.isdir(doc_path):\n return\n python_chm = [path for path in os.listdir(doc_path)\n if re.match(r\"(?i)Python[0-9]{3,6}.chm\", path)]\n if python_chm:\n return file_uri(osp.join(doc_path, python_chm[0]))\n else:\n vinf = sys.version_info\n doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])\n python_doc = osp.join(doc_path, \"index.html\")\n if osp.isfile(python_doc):\n return file_uri(python_doc)\n\n\ndef set_opengl_implementation(option):\n \"\"\"\n Set the OpenGL implementation used by Spyder.\n\n See spyder-ide/spyder#7447 for the details.\n \"\"\"\n if option == 'software':\n QCoreApplication.setAttribute(Qt.AA_UseSoftwareOpenGL)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.Software)\n elif option == 'desktop':\n QCoreApplication.setAttribute(Qt.AA_UseDesktopOpenGL)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)\n elif option == 'gles':\n QCoreApplication.setAttribute(Qt.AA_UseOpenGLES)\n if QQuickWindow is not None:\n QQuickWindow.setSceneGraphBackend(QSGRendererInterface.OpenGL)\n\n\ndef setup_logging(cli_options):\n \"\"\"Setup logging with cli options defined by the user.\"\"\"\n if cli_options.debug_info or get_debug_level() > 0:\n levels = {2: logging.INFO, 3: logging.DEBUG}\n log_level = levels[get_debug_level()]\n log_format = '%(asctime)s [%(levelname)s] [%(name)s] -> %(message)s'\n\n console_filters = cli_options.filter_log.split(',')\n console_filters = [x.strip() for x in console_filters]\n console_filters = console_filters + FILTER_NAMES\n console_filters = [x for x in console_filters if x != '']\n\n handlers = [logging.StreamHandler()]\n if cli_options.debug_output == 'file':\n log_file = 'spyder-debug.log'\n handlers.append(\n logging.FileHandler(filename=log_file, mode='w+')\n )\n else:\n log_file = None\n\n match_func = lambda x: True\n if console_filters != [''] and len(console_filters) > 0:\n dafsa = DAFSA(console_filters)\n match_func = lambda x: (dafsa.lookup(x, stop_on_prefix=True)\n is not None)\n\n formatter = logging.Formatter(log_format)\n\n class ModuleFilter(logging.Filter):\n \"\"\"Filter messages based on module name prefix.\"\"\"\n\n def filter(self, record):\n return match_func(record.name)\n\n filter = ModuleFilter()\n root_logger.setLevel(log_level)\n for handler in handlers:\n handler.addFilter(filter)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n root_logger.addHandler(handler)\n\n\ndef delete_lsp_log_files():\n \"\"\"Delete previous dead Spyder instances LSP log files.\"\"\"\n regex = re.compile(r'.*_.*_(\\d+)[.]log')\n files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))\n for f in files:\n match = regex.match(f)\n if match is not None:\n pid = int(match.group(1))\n if not psutil.pid_exists(pid):\n os.remove(f)\n\n\ndef qt_message_handler(msg_type, msg_log_context, msg_string):\n \"\"\"\n Qt warning messages are intercepted by this handler.\n\n On some operating systems, warning messages might be displayed\n even if the actual message does not apply. This filter adds a\n blacklist for messages that are being printed for no apparent\n reason. Anything else will get printed in the internal console.\n\n In DEV mode, all messages are printed.\n \"\"\"\n BLACKLIST = [\n 'QMainWidget::resizeDocks: all sizes need to be larger than 0',\n ]\n if DEV or msg_string not in BLACKLIST:\n print(msg_string) # spyder: test-skip\n\n\ndef create_splash_screen():\n \"\"\"Create splash screen.\"\"\"\n if not running_under_pytest():\n splash = QSplashScreen(QPixmap(get_image_path('splash')))\n splash_font = splash.font()\n splash_font.setPixelSize(14)\n splash.setFont(splash_font)\n else:\n splash = None\n\n return splash\n\n\ndef set_links_color(app):\n \"\"\"\n Fix color for links.\n\n This was taken from QDarkstyle, which is MIT licensed.\n \"\"\"\n color = QStylePalette.COLOR_ACCENT_3\n qcolor = QColor(color)\n\n app_palette = app.palette()\n app_palette.setColor(QPalette.Normal, QPalette.Link, qcolor)\n app.setPalette(app_palette)\n", "path": "spyder/app/utils.py"}]}
| 2,724 | 145 |
gh_patches_debug_32530
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2744
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
new strawberry.lazy not compatible with from __future__ import annotations
# Description
The new way of lazy referencing via strawberry.lazy is not compatible with
`from __future__ import annotations`
It would be nice to have compatibility as typing.Self is not available for python <=3.10 (the main reason to have `from __future__ import annotations`)
</issue>
<code>
[start of strawberry/utils/typing.py]
1 import ast
2 import sys
3 import typing
4 from collections.abc import AsyncGenerator
5 from functools import lru_cache
6 from typing import ( # type: ignore
7 TYPE_CHECKING,
8 Any,
9 Callable,
10 ClassVar,
11 Dict,
12 ForwardRef,
13 Generic,
14 Optional,
15 Tuple,
16 Type,
17 TypeVar,
18 Union,
19 _eval_type,
20 _GenericAlias,
21 _SpecialForm,
22 cast,
23 overload,
24 )
25 from typing_extensions import Annotated, get_args, get_origin
26
27 ast_unparse = getattr(ast, "unparse", None)
28 # ast.unparse is only available on python 3.9+. For older versions we will
29 # use `astunparse.unparse`.
30 # We are also using "not TYPE_CHECKING" here because mypy gives an erorr
31 # on tests because "astunparse" is missing stubs, but the mypy action says
32 # that the comment is unused.
33 if not TYPE_CHECKING and ast_unparse is None:
34 import astunparse
35
36 ast_unparse = astunparse.unparse
37
38
39 @lru_cache()
40 def get_generic_alias(type_: Type) -> Type:
41 """Get the generic alias for a type.
42
43 Given a type, its generic alias from `typing` module will be returned
44 if it exists. For example:
45
46 >>> get_generic_alias(list)
47 typing.List
48 >>> get_generic_alias(dict)
49 typing.Dict
50
51 This is mostly useful for python versions prior to 3.9, to get a version
52 of a concrete type which supports `__class_getitem__`. In 3.9+ types like
53 `list`/`dict`/etc are subscriptable and can be used directly instead
54 of their generic alias version.
55 """
56 if isinstance(type_, _SpecialForm):
57 return type_
58
59 for attr_name in dir(typing):
60 # ignore private attributes, they are not Generic aliases
61 if attr_name.startswith("_"): # pragma: no cover
62 continue
63
64 attr = getattr(typing, attr_name)
65 # _GenericAlias overrides all the methods that we can use to know if
66 # this is a subclass of it. But if it has an "_inst" attribute
67 # then it for sure is a _GenericAlias
68 if hasattr(attr, "_inst") and attr.__origin__ is type_:
69 return attr
70
71 raise AssertionError(f"No GenericAlias available for {type_}") # pragma: no cover
72
73
74 def is_list(annotation: object) -> bool:
75 """Returns True if annotation is a List"""
76
77 annotation_origin = getattr(annotation, "__origin__", None)
78
79 return annotation_origin == list
80
81
82 def is_union(annotation: object) -> bool:
83 """Returns True if annotation is a Union"""
84
85 # this check is needed because unions declared with the new syntax `A | B`
86 # don't have a `__origin__` property on them, but they are instances of
87 # `UnionType`, which is only available in Python 3.10+
88 if sys.version_info >= (3, 10):
89 from types import UnionType
90
91 if isinstance(annotation, UnionType):
92 return True
93
94 # unions declared as Union[A, B] fall through to this check, even on python 3.10+
95
96 annotation_origin = getattr(annotation, "__origin__", None)
97
98 return annotation_origin == Union
99
100
101 def is_optional(annotation: Type) -> bool:
102 """Returns True if the annotation is Optional[SomeType]"""
103
104 # Optionals are represented as unions
105
106 if not is_union(annotation):
107 return False
108
109 types = annotation.__args__
110
111 # A Union to be optional needs to have at least one None type
112 return any(x == None.__class__ for x in types)
113
114
115 def get_optional_annotation(annotation: Type) -> Type:
116 types = annotation.__args__
117
118 non_none_types = tuple(x for x in types if x != None.__class__)
119
120 # if we have multiple non none types we want to return a copy of this
121 # type (normally a Union type).
122
123 if len(non_none_types) > 1:
124 return annotation.copy_with(non_none_types)
125
126 return non_none_types[0]
127
128
129 def get_list_annotation(annotation: Type) -> Type:
130 return annotation.__args__[0]
131
132
133 def is_concrete_generic(annotation: type) -> bool:
134 ignored_generics = (list, tuple, Union, ClassVar, AsyncGenerator)
135 return (
136 isinstance(annotation, _GenericAlias)
137 and annotation.__origin__ not in ignored_generics
138 )
139
140
141 def is_generic_subclass(annotation: type) -> bool:
142 return isinstance(annotation, type) and issubclass(
143 annotation, Generic # type:ignore
144 )
145
146
147 def is_generic(annotation: type) -> bool:
148 """Returns True if the annotation is or extends a generic."""
149
150 return (
151 # TODO: These two lines appear to have the same effect. When will an
152 # annotation have parameters but not satisfy the first condition?
153 (is_generic_subclass(annotation) or is_concrete_generic(annotation))
154 and bool(get_parameters(annotation))
155 )
156
157
158 def is_type_var(annotation: Type) -> bool:
159 """Returns True if the annotation is a TypeVar."""
160
161 return isinstance(annotation, TypeVar)
162
163
164 def get_parameters(annotation: Type) -> Union[Tuple[object], Tuple[()]]:
165 if (
166 isinstance(annotation, _GenericAlias)
167 or isinstance(annotation, type)
168 and issubclass(annotation, Generic) # type:ignore
169 and annotation is not Generic
170 ):
171 return annotation.__parameters__
172 else:
173 return () # pragma: no cover
174
175
176 @overload
177 def _ast_replace_union_operation(expr: ast.expr) -> ast.expr:
178 ...
179
180
181 @overload
182 def _ast_replace_union_operation(expr: ast.Expr) -> ast.Expr:
183 ...
184
185
186 def _ast_replace_union_operation(
187 expr: Union[ast.Expr, ast.expr]
188 ) -> Union[ast.Expr, ast.expr]:
189 if isinstance(expr, ast.Expr) and isinstance(
190 expr.value, (ast.BinOp, ast.Subscript)
191 ):
192 expr = ast.Expr(_ast_replace_union_operation(expr.value))
193 elif isinstance(expr, ast.BinOp):
194 left = _ast_replace_union_operation(expr.left)
195 right = _ast_replace_union_operation(expr.right)
196 expr = ast.Subscript(
197 ast.Name(id="Union"),
198 ast.Tuple([left, right], ast.Load()),
199 ast.Load(),
200 )
201 elif isinstance(expr, ast.Tuple):
202 expr = ast.Tuple(
203 [_ast_replace_union_operation(elt) for elt in expr.elts],
204 ast.Load(),
205 )
206 elif isinstance(expr, ast.Subscript):
207 if hasattr(ast, "Index") and isinstance(expr.slice, ast.Index):
208 expr = ast.Subscript(
209 expr.value,
210 # The cast is required for mypy on python 3.7 and 3.8
211 ast.Index(_ast_replace_union_operation(cast(Any, expr.slice).value)),
212 ast.Load(),
213 )
214 elif isinstance(expr.slice, (ast.BinOp, ast.Tuple)):
215 expr = ast.Subscript(
216 expr.value,
217 _ast_replace_union_operation(expr.slice),
218 ast.Load(),
219 )
220
221 return expr
222
223
224 def eval_type(
225 type_: Any,
226 globalns: Optional[Dict] = None,
227 localns: Optional[Dict] = None,
228 ) -> Type:
229 """Evaluates a type, resolving forward references."""
230 from strawberry.auto import StrawberryAuto
231 from strawberry.lazy_type import StrawberryLazyReference
232 from strawberry.private import StrawberryPrivate
233
234 globalns = globalns or {}
235 # If this is not a string, maybe its args are (e.g. List["Foo"])
236 if isinstance(type_, ForwardRef):
237 # For Python 3.10+, we can use the built-in _eval_type function directly.
238 # It will handle "|" notations properly
239 if sys.version_info < (3, 10):
240 parsed = _ast_replace_union_operation(
241 cast(ast.Expr, ast.parse(type_.__forward_arg__).body[0])
242 )
243
244 # We replaced "a | b" with "Union[a, b], so make sure Union can be resolved
245 # at globalns because it may not be there
246 if "Union" not in globalns:
247 globalns["Union"] = Union
248
249 assert ast_unparse
250 type_ = ForwardRef(ast_unparse(parsed))
251
252 return _eval_type(type_, globalns, localns)
253
254 origin = get_origin(type_)
255 if origin is not None:
256 args = get_args(type_)
257 if origin is Annotated:
258 for arg in args[1:]:
259 if isinstance(arg, StrawberryPrivate):
260 return type_
261
262 if isinstance(arg, StrawberryLazyReference):
263 remaining_args = [
264 a
265 for a in args[1:]
266 if not isinstance(arg, StrawberryLazyReference)
267 ]
268 args = (arg.resolve_forward_ref(args[0]), *remaining_args)
269 break
270 if isinstance(arg, StrawberryAuto):
271 remaining_args = [
272 a for a in args[1:] if not isinstance(arg, StrawberryAuto)
273 ]
274 args = (arg, *remaining_args)
275 break
276
277 # If we have only a StrawberryLazyReference and no more annotations,
278 # we need to return the argument directly because Annotated
279 # will raise an error if trying to instantiate it with only
280 # one argument.
281 if len(args) == 1:
282 return args[0]
283
284 # python 3.10 will return UnionType for origin, and it cannot be
285 # subscripted like Union[Foo, Bar]
286 if sys.version_info >= (3, 10):
287 from types import UnionType
288
289 if origin is UnionType:
290 origin = Union
291
292 # Future annotations in older versions will eval generic aliases to their
293 # real types (i.e. List[foo] will have its origin set to list instead
294 # of List). If that type is not subscriptable, retrieve its generic
295 # alias version instead.
296 if sys.version_info < (3, 9) and not hasattr(origin, "__class_getitem__"):
297 origin = get_generic_alias(origin)
298
299 type_ = (
300 origin[tuple(eval_type(a, globalns, localns) for a in args)]
301 if args
302 else origin
303 )
304
305 return type_
306
307
308 _T = TypeVar("_T")
309
310
311 def __dataclass_transform__(
312 *,
313 eq_default: bool = True,
314 order_default: bool = False,
315 kw_only_default: bool = False,
316 field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
317 ) -> Callable[[_T], _T]:
318 return lambda a: a
319
[end of strawberry/utils/typing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/strawberry/utils/typing.py b/strawberry/utils/typing.py
--- a/strawberry/utils/typing.py
+++ b/strawberry/utils/typing.py
@@ -1,4 +1,5 @@
import ast
+import re
import sys
import typing
from collections.abc import AsyncGenerator
@@ -221,6 +222,11 @@
return expr
+_annotated_re = re.compile(
+ r"(Annotated\[)(?P<type>\w*),(?P<args>.*)(\])",
+)
+
+
def eval_type(
type_: Any,
globalns: Optional[Dict] = None,
@@ -249,6 +255,37 @@
assert ast_unparse
type_ = ForwardRef(ast_unparse(parsed))
+ # When using forward refs, the whole
+ # Annotated[SomeType, strabwerry.lazy("type.module")] is a forward ref,
+ # and trying to _eval_type on it will fail. Take a different approach
+ # here to resolve lazy types by execing the annotated args and resolving
+ # the type directly.
+ annotated_match = _annotated_re.search(type_.__forward_arg__)
+ if annotated_match:
+ gdict = annotated_match.groupdict()
+ # FIXME: Eval the remaining annotated args to get their real values
+ # We might want to refactor how we import lazy modules to avoid having
+ # to eval the code in here
+ args = eval(f'({gdict["args"]}, )', globalns, localns) # noqa: PGH001
+ lazy_ref = next(
+ (arg for arg in args if isinstance(arg, StrawberryLazyReference)),
+ None,
+ )
+ if lazy_ref is not None:
+ remaining = [
+ a for a in args if not isinstance(a, StrawberryLazyReference)
+ ]
+ type_ = lazy_ref.resolve_forward_ref(ForwardRef(gdict["type"]))
+ # If we only had a StrawberryLazyReference, we can return the type
+ # directly. It already did its job!
+ if not remaining:
+ return type_
+
+ # Otherwise return the type annotated with the remaining annotations
+ return Annotated.__class_getitem__( # type: ignore
+ (type_, *remaining),
+ )
+
return _eval_type(type_, globalns, localns)
origin = get_origin(type_)
|
{"golden_diff": "diff --git a/strawberry/utils/typing.py b/strawberry/utils/typing.py\n--- a/strawberry/utils/typing.py\n+++ b/strawberry/utils/typing.py\n@@ -1,4 +1,5 @@\n import ast\n+import re\n import sys\n import typing\n from collections.abc import AsyncGenerator\n@@ -221,6 +222,11 @@\n return expr\n \n \n+_annotated_re = re.compile(\n+ r\"(Annotated\\[)(?P<type>\\w*),(?P<args>.*)(\\])\",\n+)\n+\n+\n def eval_type(\n type_: Any,\n globalns: Optional[Dict] = None,\n@@ -249,6 +255,37 @@\n assert ast_unparse\n type_ = ForwardRef(ast_unparse(parsed))\n \n+ # When using forward refs, the whole\n+ # Annotated[SomeType, strabwerry.lazy(\"type.module\")] is a forward ref,\n+ # and trying to _eval_type on it will fail. Take a different approach\n+ # here to resolve lazy types by execing the annotated args and resolving\n+ # the type directly.\n+ annotated_match = _annotated_re.search(type_.__forward_arg__)\n+ if annotated_match:\n+ gdict = annotated_match.groupdict()\n+ # FIXME: Eval the remaining annotated args to get their real values\n+ # We might want to refactor how we import lazy modules to avoid having\n+ # to eval the code in here\n+ args = eval(f'({gdict[\"args\"]}, )', globalns, localns) # noqa: PGH001\n+ lazy_ref = next(\n+ (arg for arg in args if isinstance(arg, StrawberryLazyReference)),\n+ None,\n+ )\n+ if lazy_ref is not None:\n+ remaining = [\n+ a for a in args if not isinstance(a, StrawberryLazyReference)\n+ ]\n+ type_ = lazy_ref.resolve_forward_ref(ForwardRef(gdict[\"type\"]))\n+ # If we only had a StrawberryLazyReference, we can return the type\n+ # directly. It already did its job!\n+ if not remaining:\n+ return type_\n+\n+ # Otherwise return the type annotated with the remaining annotations\n+ return Annotated.__class_getitem__( # type: ignore\n+ (type_, *remaining),\n+ )\n+\n return _eval_type(type_, globalns, localns)\n \n origin = get_origin(type_)\n", "issue": "new strawberry.lazy not compatible with from __future__ import annotations\n# Description\r\n\r\nThe new way of lazy referencing via strawberry.lazy is not compatible with \r\n`from __future__ import annotations`\r\n\r\nIt would be nice to have compatibility as typing.Self is not available for python <=3.10 (the main reason to have `from __future__ import annotations`)\n", "before_files": [{"content": "import ast\nimport sys\nimport typing\nfrom collections.abc import AsyncGenerator\nfrom functools import lru_cache\nfrom typing import ( # type: ignore\n TYPE_CHECKING,\n Any,\n Callable,\n ClassVar,\n Dict,\n ForwardRef,\n Generic,\n Optional,\n Tuple,\n Type,\n TypeVar,\n Union,\n _eval_type,\n _GenericAlias,\n _SpecialForm,\n cast,\n overload,\n)\nfrom typing_extensions import Annotated, get_args, get_origin\n\nast_unparse = getattr(ast, \"unparse\", None)\n# ast.unparse is only available on python 3.9+. For older versions we will\n# use `astunparse.unparse`.\n# We are also using \"not TYPE_CHECKING\" here because mypy gives an erorr\n# on tests because \"astunparse\" is missing stubs, but the mypy action says\n# that the comment is unused.\nif not TYPE_CHECKING and ast_unparse is None:\n import astunparse\n\n ast_unparse = astunparse.unparse\n\n\n@lru_cache()\ndef get_generic_alias(type_: Type) -> Type:\n \"\"\"Get the generic alias for a type.\n\n Given a type, its generic alias from `typing` module will be returned\n if it exists. For example:\n\n >>> get_generic_alias(list)\n typing.List\n >>> get_generic_alias(dict)\n typing.Dict\n\n This is mostly useful for python versions prior to 3.9, to get a version\n of a concrete type which supports `__class_getitem__`. In 3.9+ types like\n `list`/`dict`/etc are subscriptable and can be used directly instead\n of their generic alias version.\n \"\"\"\n if isinstance(type_, _SpecialForm):\n return type_\n\n for attr_name in dir(typing):\n # ignore private attributes, they are not Generic aliases\n if attr_name.startswith(\"_\"): # pragma: no cover\n continue\n\n attr = getattr(typing, attr_name)\n # _GenericAlias overrides all the methods that we can use to know if\n # this is a subclass of it. But if it has an \"_inst\" attribute\n # then it for sure is a _GenericAlias\n if hasattr(attr, \"_inst\") and attr.__origin__ is type_:\n return attr\n\n raise AssertionError(f\"No GenericAlias available for {type_}\") # pragma: no cover\n\n\ndef is_list(annotation: object) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n\ndef is_union(annotation: object) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == Union\n\n\ndef is_optional(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n\n if not is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x == None.__class__ for x in types)\n\n\ndef get_optional_annotation(annotation: Type) -> Type:\n types = annotation.__args__\n\n non_none_types = tuple(x for x in types if x != None.__class__)\n\n # if we have multiple non none types we want to return a copy of this\n # type (normally a Union type).\n\n if len(non_none_types) > 1:\n return annotation.copy_with(non_none_types)\n\n return non_none_types[0]\n\n\ndef get_list_annotation(annotation: Type) -> Type:\n return annotation.__args__[0]\n\n\ndef is_concrete_generic(annotation: type) -> bool:\n ignored_generics = (list, tuple, Union, ClassVar, AsyncGenerator)\n return (\n isinstance(annotation, _GenericAlias)\n and annotation.__origin__ not in ignored_generics\n )\n\n\ndef is_generic_subclass(annotation: type) -> bool:\n return isinstance(annotation, type) and issubclass(\n annotation, Generic # type:ignore\n )\n\n\ndef is_generic(annotation: type) -> bool:\n \"\"\"Returns True if the annotation is or extends a generic.\"\"\"\n\n return (\n # TODO: These two lines appear to have the same effect. When will an\n # annotation have parameters but not satisfy the first condition?\n (is_generic_subclass(annotation) or is_concrete_generic(annotation))\n and bool(get_parameters(annotation))\n )\n\n\ndef is_type_var(annotation: Type) -> bool:\n \"\"\"Returns True if the annotation is a TypeVar.\"\"\"\n\n return isinstance(annotation, TypeVar)\n\n\ndef get_parameters(annotation: Type) -> Union[Tuple[object], Tuple[()]]:\n if (\n isinstance(annotation, _GenericAlias)\n or isinstance(annotation, type)\n and issubclass(annotation, Generic) # type:ignore\n and annotation is not Generic\n ):\n return annotation.__parameters__\n else:\n return () # pragma: no cover\n\n\n@overload\ndef _ast_replace_union_operation(expr: ast.expr) -> ast.expr:\n ...\n\n\n@overload\ndef _ast_replace_union_operation(expr: ast.Expr) -> ast.Expr:\n ...\n\n\ndef _ast_replace_union_operation(\n expr: Union[ast.Expr, ast.expr]\n) -> Union[ast.Expr, ast.expr]:\n if isinstance(expr, ast.Expr) and isinstance(\n expr.value, (ast.BinOp, ast.Subscript)\n ):\n expr = ast.Expr(_ast_replace_union_operation(expr.value))\n elif isinstance(expr, ast.BinOp):\n left = _ast_replace_union_operation(expr.left)\n right = _ast_replace_union_operation(expr.right)\n expr = ast.Subscript(\n ast.Name(id=\"Union\"),\n ast.Tuple([left, right], ast.Load()),\n ast.Load(),\n )\n elif isinstance(expr, ast.Tuple):\n expr = ast.Tuple(\n [_ast_replace_union_operation(elt) for elt in expr.elts],\n ast.Load(),\n )\n elif isinstance(expr, ast.Subscript):\n if hasattr(ast, \"Index\") and isinstance(expr.slice, ast.Index):\n expr = ast.Subscript(\n expr.value,\n # The cast is required for mypy on python 3.7 and 3.8\n ast.Index(_ast_replace_union_operation(cast(Any, expr.slice).value)),\n ast.Load(),\n )\n elif isinstance(expr.slice, (ast.BinOp, ast.Tuple)):\n expr = ast.Subscript(\n expr.value,\n _ast_replace_union_operation(expr.slice),\n ast.Load(),\n )\n\n return expr\n\n\ndef eval_type(\n type_: Any,\n globalns: Optional[Dict] = None,\n localns: Optional[Dict] = None,\n) -> Type:\n \"\"\"Evaluates a type, resolving forward references.\"\"\"\n from strawberry.auto import StrawberryAuto\n from strawberry.lazy_type import StrawberryLazyReference\n from strawberry.private import StrawberryPrivate\n\n globalns = globalns or {}\n # If this is not a string, maybe its args are (e.g. List[\"Foo\"])\n if isinstance(type_, ForwardRef):\n # For Python 3.10+, we can use the built-in _eval_type function directly.\n # It will handle \"|\" notations properly\n if sys.version_info < (3, 10):\n parsed = _ast_replace_union_operation(\n cast(ast.Expr, ast.parse(type_.__forward_arg__).body[0])\n )\n\n # We replaced \"a | b\" with \"Union[a, b], so make sure Union can be resolved\n # at globalns because it may not be there\n if \"Union\" not in globalns:\n globalns[\"Union\"] = Union\n\n assert ast_unparse\n type_ = ForwardRef(ast_unparse(parsed))\n\n return _eval_type(type_, globalns, localns)\n\n origin = get_origin(type_)\n if origin is not None:\n args = get_args(type_)\n if origin is Annotated:\n for arg in args[1:]:\n if isinstance(arg, StrawberryPrivate):\n return type_\n\n if isinstance(arg, StrawberryLazyReference):\n remaining_args = [\n a\n for a in args[1:]\n if not isinstance(arg, StrawberryLazyReference)\n ]\n args = (arg.resolve_forward_ref(args[0]), *remaining_args)\n break\n if isinstance(arg, StrawberryAuto):\n remaining_args = [\n a for a in args[1:] if not isinstance(arg, StrawberryAuto)\n ]\n args = (arg, *remaining_args)\n break\n\n # If we have only a StrawberryLazyReference and no more annotations,\n # we need to return the argument directly because Annotated\n # will raise an error if trying to instantiate it with only\n # one argument.\n if len(args) == 1:\n return args[0]\n\n # python 3.10 will return UnionType for origin, and it cannot be\n # subscripted like Union[Foo, Bar]\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if origin is UnionType:\n origin = Union\n\n # Future annotations in older versions will eval generic aliases to their\n # real types (i.e. List[foo] will have its origin set to list instead\n # of List). If that type is not subscriptable, retrieve its generic\n # alias version instead.\n if sys.version_info < (3, 9) and not hasattr(origin, \"__class_getitem__\"):\n origin = get_generic_alias(origin)\n\n type_ = (\n origin[tuple(eval_type(a, globalns, localns) for a in args)]\n if args\n else origin\n )\n\n return type_\n\n\n_T = TypeVar(\"_T\")\n\n\ndef __dataclass_transform__(\n *,\n eq_default: bool = True,\n order_default: bool = False,\n kw_only_default: bool = False,\n field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),\n) -> Callable[[_T], _T]:\n return lambda a: a\n", "path": "strawberry/utils/typing.py"}]}
| 3,805 | 554 |
gh_patches_debug_22417
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-537
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unicode error: python 2 + merge conflict + non-ascii commit message
The important part of the stack:
```
File "...python2.7/site-packages/pre_commit/commands/run.py", line 52, in get_filenames
return getter(include_expr, exclude_expr)
File "...python2.7/site-packages/pre_commit/util.py", line 46, in wrapper
ret = wrapper._cache[key] = func(*args)
File "...python2.7/site-packages/pre_commit/git.py", line 98, in wrapper
for filename in all_file_list_strategy()
File "...python2.7/site-packages/pre_commit/util.py", line 46, in wrapper
ret = wrapper._cache[key] = func(*args)
File "...python2.7/site-packages/pre_commit/git.py", line 64, in get_conflicted_files
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
File "...python2.7/site-packages/pre_commit/git.py", line 54, in parse_merge_msg_for_conflicts
if line.startswith(('\t', '#\t'))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 37: ordinal not in range(128)
```
An easy fix: https://github.com/pre-commit/pre-commit/blob/e3b14c35f782ed464e3f96b44e8509048187689f/pre_commit/git.py#L63
</issue>
<code>
[start of pre_commit/git.py]
1 from __future__ import unicode_literals
2
3 import functools
4 import logging
5 import os.path
6 import re
7 import sys
8
9 from pre_commit.errors import FatalError
10 from pre_commit.util import CalledProcessError
11 from pre_commit.util import cmd_output
12 from pre_commit.util import memoize_by_cwd
13
14
15 logger = logging.getLogger('pre_commit')
16
17
18 def get_root():
19 try:
20 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
21 except CalledProcessError:
22 raise FatalError(
23 'git failed. Is it installed, and are you in a Git repository '
24 'directory?'
25 )
26
27
28 def get_git_dir(git_root):
29 return os.path.normpath(os.path.join(
30 git_root,
31 cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),
32 ))
33
34
35 def get_remote_url(git_root):
36 ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
37 return ret.strip()
38
39
40 def is_in_merge_conflict():
41 git_dir = get_git_dir('.')
42 return (
43 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
44 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
45 )
46
47
48 def parse_merge_msg_for_conflicts(merge_msg):
49 # Conflicted files start with tabs
50 return [
51 line.lstrip('#').strip()
52 for line in merge_msg.splitlines()
53 # '#\t' for git 2.4.1
54 if line.startswith(('\t', '#\t'))
55 ]
56
57
58 @memoize_by_cwd
59 def get_conflicted_files():
60 logger.info('Checking merge-conflict files only.')
61 # Need to get the conflicted files from the MERGE_MSG because they could
62 # have resolved the conflict by choosing one side or the other
63 merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
64 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
65
66 # This will get the rest of the changes made after the merge.
67 # If they resolved the merge conflict by choosing a mesh of both sides
68 # this will also include the conflicted files
69 tree_hash = cmd_output('git', 'write-tree')[1].strip()
70 merge_diff_filenames = cmd_output(
71 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',
72 )[1].splitlines()
73 return set(merge_conflict_filenames) | set(merge_diff_filenames)
74
75
76 @memoize_by_cwd
77 def get_staged_files():
78 return cmd_output(
79 'git', 'diff', '--staged', '--name-only',
80 # Everything except for D
81 '--diff-filter=ACMRTUXB'
82 )[1].splitlines()
83
84
85 @memoize_by_cwd
86 def get_all_files():
87 return cmd_output('git', 'ls-files')[1].splitlines()
88
89
90 def get_files_matching(all_file_list_strategy):
91 @functools.wraps(all_file_list_strategy)
92 @memoize_by_cwd
93 def wrapper(include_expr, exclude_expr):
94 include_regex = re.compile(include_expr)
95 exclude_regex = re.compile(exclude_expr)
96 return {
97 filename
98 for filename in all_file_list_strategy()
99 if (
100 include_regex.search(filename) and
101 not exclude_regex.search(filename) and
102 os.path.lexists(filename)
103 )
104 }
105 return wrapper
106
107
108 get_staged_files_matching = get_files_matching(get_staged_files)
109 get_all_files_matching = get_files_matching(get_all_files)
110 get_conflicted_files_matching = get_files_matching(get_conflicted_files)
111
112
113 def check_for_cygwin_mismatch():
114 """See https://github.com/pre-commit/pre-commit/issues/354"""
115 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
116 is_cygwin_python = sys.platform == 'cygwin'
117 toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
118 is_cygwin_git = toplevel.startswith('/')
119
120 if is_cygwin_python ^ is_cygwin_git:
121 exe_type = {True: '(cygwin)', False: '(windows)'}
122 logger.warn(
123 'pre-commit has detected a mix of cygwin python / git\n'
124 'This combination is not supported, it is likely you will '
125 'receive an error later in the program.\n'
126 'Make sure to use cygwin git+python while using cygwin\n'
127 'These can be installed through the cygwin installer.\n'
128 ' - python {}\n'
129 ' - git {}\n'.format(
130 exe_type[is_cygwin_python],
131 exe_type[is_cygwin_git],
132 )
133 )
134
[end of pre_commit/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -48,10 +48,10 @@
def parse_merge_msg_for_conflicts(merge_msg):
# Conflicted files start with tabs
return [
- line.lstrip('#').strip()
+ line.lstrip(b'#').strip().decode('UTF-8')
for line in merge_msg.splitlines()
# '#\t' for git 2.4.1
- if line.startswith(('\t', '#\t'))
+ if line.startswith((b'\t', b'#\t'))
]
@@ -60,7 +60,7 @@
logger.info('Checking merge-conflict files only.')
# Need to get the conflicted files from the MERGE_MSG because they could
# have resolved the conflict by choosing one side or the other
- merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
# This will get the rest of the changes made after the merge.
|
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -48,10 +48,10 @@\n def parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n- line.lstrip('#').strip()\n+ line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n- if line.startswith(('\\t', '#\\t'))\n+ if line.startswith((b'\\t', b'#\\t'))\n ]\n \n \n@@ -60,7 +60,7 @@\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n- merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n \n # This will get the rest of the changes made after the merge.\n", "issue": "Unicode error: python 2 + merge conflict + non-ascii commit message\nThe important part of the stack:\r\n\r\n```\r\n File \"...python2.7/site-packages/pre_commit/commands/run.py\", line 52, in get_filenames\r\n return getter(include_expr, exclude_expr)\r\n File \"...python2.7/site-packages/pre_commit/util.py\", line 46, in wrapper\r\n ret = wrapper._cache[key] = func(*args)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 98, in wrapper\r\n for filename in all_file_list_strategy()\r\n File \"...python2.7/site-packages/pre_commit/util.py\", line 46, in wrapper\r\n ret = wrapper._cache[key] = func(*args)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 64, in get_conflicted_files\r\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 54, in parse_merge_msg_for_conflicts\r\n if line.startswith(('\\t', '#\\t'))\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 37: ordinal not in range(128)\r\n```\r\n\r\nAn easy fix: https://github.com/pre-commit/pre-commit/blob/e3b14c35f782ed464e3f96b44e8509048187689f/pre_commit/git.py#L63\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\nimport sys\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return {\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n }\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python],\n exe_type[is_cygwin_git],\n )\n )\n", "path": "pre_commit/git.py"}]}
| 2,214 | 277 |
gh_patches_debug_25875
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2292
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invalid query parameters cause internal server error
```
File "akvo/rest/filters.py", line 61, in filter_queryset
queryset = getattr(queryset, param)(**args_or_kwargs)
```
In the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}.
We should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`
</issue>
<code>
[start of akvo/rest/filters.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo Reporting is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import ast
8
9 from django.db.models import Q
10
11 from rest_framework import filters
12
13
14 class RSRGenericFilterBackend(filters.BaseFilterBackend):
15
16 def filter_queryset(self, request, queryset, view):
17 """
18 Return a queryset possibly filtered by query param values.
19 The filter looks for the query param keys filter and exclude
20 For each of these query param the value is evaluated using ast.literal_eval() and used as
21 kwargs in queryset.filter and queryset.exclude respectively.
22
23 Example URLs:
24 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}
25 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}
26
27 It's also possible to specify models to be included in select_related() and
28 prefetch_related() calls on the queryset, but specifying these in lists of strings as the
29 values for the query sting params select_relates and prefetch_related.
30
31 Example:
32 http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']
33
34 Finally limited support for filtering on multiple arguments using logical OR between
35 those expressions is available. To use this supply two or more query string keywords on the
36 form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q
37 object. All those Q objects created are used in a queryset.filter() call concatenated using
38 the | operator.
39 """
40 def eval_query_value(request, key):
41 """
42 Use ast.literal_eval() to evaluate a query string value as a python data type object
43 :param request: the django request object
44 :param param: the query string param key
45 :return: a python data type object, or None if literal_eval() fails
46 """
47 value = request.QUERY_PARAMS.get(key, None)
48 try:
49 return ast.literal_eval(value)
50 except ValueError:
51 return None
52
53 qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']
54
55 # evaluate each query string param, and apply the queryset method with the same name
56 for param in qs_params:
57 args_or_kwargs = eval_query_value(request, param)
58 if args_or_kwargs:
59 # filter and exclude are called with a dict kwarg, the _related methods with a list
60 if param in ['filter', 'exclude',]:
61 queryset = getattr(queryset, param)(**args_or_kwargs)
62 else:
63 queryset = getattr(queryset, param)(*args_or_kwargs)
64
65 # support for Q expressions, limited to OR-concatenated filtering
66 if request.QUERY_PARAMS.get('q_filter1', None):
67 i = 1
68 q_queries = []
69 while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):
70 query_arg = eval_query_value(request, 'q_filter{}'.format(i))
71 if query_arg:
72 q_queries += [query_arg]
73 i += 1
74
75 q_expr = Q(**q_queries[0])
76 for query in q_queries[1:]:
77 q_expr = q_expr | Q(**query)
78
79 queryset = queryset.filter(q_expr)
80
81 return queryset
82
[end of akvo/rest/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py
--- a/akvo/rest/filters.py
+++ b/akvo/rest/filters.py
@@ -7,8 +7,10 @@
import ast
from django.db.models import Q
+from django.core.exceptions import FieldError
-from rest_framework import filters
+from rest_framework import filters, status
+from rest_framework.exceptions import APIException
class RSRGenericFilterBackend(filters.BaseFilterBackend):
@@ -57,10 +59,14 @@
args_or_kwargs = eval_query_value(request, param)
if args_or_kwargs:
# filter and exclude are called with a dict kwarg, the _related methods with a list
- if param in ['filter', 'exclude',]:
- queryset = getattr(queryset, param)(**args_or_kwargs)
- else:
- queryset = getattr(queryset, param)(*args_or_kwargs)
+ try:
+ if param in ['filter', 'exclude',]:
+ queryset = getattr(queryset, param)(**args_or_kwargs)
+ else:
+ queryset = getattr(queryset, param)(*args_or_kwargs)
+
+ except FieldError as e:
+ raise APIException("Error in request: {message}".format(message=e.message))
# support for Q expressions, limited to OR-concatenated filtering
if request.QUERY_PARAMS.get('q_filter1', None):
|
{"golden_diff": "diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py\n--- a/akvo/rest/filters.py\n+++ b/akvo/rest/filters.py\n@@ -7,8 +7,10 @@\n import ast\n \n from django.db.models import Q\n+from django.core.exceptions import FieldError\n \n-from rest_framework import filters\n+from rest_framework import filters, status\n+from rest_framework.exceptions import APIException\n \n \n class RSRGenericFilterBackend(filters.BaseFilterBackend):\n@@ -57,10 +59,14 @@\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n- if param in ['filter', 'exclude',]:\n- queryset = getattr(queryset, param)(**args_or_kwargs)\n- else:\n- queryset = getattr(queryset, param)(*args_or_kwargs)\n+ try:\n+ if param in ['filter', 'exclude',]:\n+ queryset = getattr(queryset, param)(**args_or_kwargs)\n+ else:\n+ queryset = getattr(queryset, param)(*args_or_kwargs)\n+\n+ except FieldError as e:\n+ raise APIException(\"Error in request: {message}\".format(message=e.message))\n \n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n", "issue": "Invalid query parameters cause internal server error\n```\n File \"akvo/rest/filters.py\", line 61, in filter_queryset\n queryset = getattr(queryset, param)(**args_or_kwargs)\n```\n\nIn the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}. \n\nWe should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport ast\n\nfrom django.db.models import Q\n\nfrom rest_framework import filters\n\n\nclass RSRGenericFilterBackend(filters.BaseFilterBackend):\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a queryset possibly filtered by query param values.\n The filter looks for the query param keys filter and exclude\n For each of these query param the value is evaluated using ast.literal_eval() and used as\n kwargs in queryset.filter and queryset.exclude respectively.\n\n Example URLs:\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}\n\n It's also possible to specify models to be included in select_related() and\n prefetch_related() calls on the queryset, but specifying these in lists of strings as the\n values for the query sting params select_relates and prefetch_related.\n\n Example:\n http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']\n\n Finally limited support for filtering on multiple arguments using logical OR between\n those expressions is available. To use this supply two or more query string keywords on the\n form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q\n object. All those Q objects created are used in a queryset.filter() call concatenated using\n the | operator.\n \"\"\"\n def eval_query_value(request, key):\n \"\"\"\n Use ast.literal_eval() to evaluate a query string value as a python data type object\n :param request: the django request object\n :param param: the query string param key\n :return: a python data type object, or None if literal_eval() fails\n \"\"\"\n value = request.QUERY_PARAMS.get(key, None)\n try:\n return ast.literal_eval(value)\n except ValueError:\n return None\n\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']\n\n # evaluate each query string param, and apply the queryset method with the same name\n for param in qs_params:\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n if param in ['filter', 'exclude',]:\n queryset = getattr(queryset, param)(**args_or_kwargs)\n else:\n queryset = getattr(queryset, param)(*args_or_kwargs)\n\n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n i = 1\n q_queries = []\n while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):\n query_arg = eval_query_value(request, 'q_filter{}'.format(i))\n if query_arg:\n q_queries += [query_arg]\n i += 1\n\n q_expr = Q(**q_queries[0])\n for query in q_queries[1:]:\n q_expr = q_expr | Q(**query)\n\n queryset = queryset.filter(q_expr)\n\n return queryset\n", "path": "akvo/rest/filters.py"}]}
| 1,580 | 311 |
gh_patches_debug_14879
|
rasdani/github-patches
|
git_diff
|
pymeasure__pymeasure-547
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.10: ManagedWindow TypeError
Hi all,
I am trying to use pymeasure for the first time. But when running the example "ManagedWindow" I get the following error message as soon as I press the "queue" button in the GUI and nothing will be plotted.
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\manager.py in _update_progress(self, progress)
158 def _update_progress(self, progress):
159 if self.is_running():
--> 160 self._running_experiment.browser_item.setProgress(progress)
161
162 def _update_status(self, status):
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\browser.py in setProgress(self, progress)
73
74 def setProgress(self, progress):
---> 75 self.progressbar.setValue(progress)
76
77 class Browser(QtGui.QTreeWidget):
TypeError: setValue(self, int): argument 1 has unexpected type 'float'---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\manager.py in _finish(self)
253 experiment = self._running_experiment
254 self._clean_up()
--> 255 experiment.browser_item.setProgress(100.)
256 experiment.curve.update()
257 self.finished.emit(experiment)
C:\Program Files\Python 3.10\lib\site-packages\pymeasure\display\browser.py in setProgress(self, progress)
73
74 def setProgress(self, progress):
---> 75 self.progressbar.setValue(progress)
76
77 class Browser(QtGui.QTreeWidget):
TypeError: setValue(self, int): argument 1 has unexpected type 'float'An exception has occurred, use %tb to see the full traceback.
```
I'm using Windows and Python 3.10.
Do you have an idea what is wrong? Probably something with the data type of progress. But what should I change?
Thanks for your help.
</issue>
<code>
[start of pymeasure/display/browser.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2021 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import logging
26
27 from os.path import basename
28
29 from .Qt import QtCore, QtGui
30 from ..experiment import Procedure
31
32 log = logging.getLogger(__name__)
33 log.addHandler(logging.NullHandler())
34
35
36 class BrowserItem(QtGui.QTreeWidgetItem):
37 """ Represent a row in the :class:`~pymeasure.display.browser.Browser` tree widget """
38
39 def __init__(self, results, color, parent=None):
40 super().__init__(parent)
41
42 pixelmap = QtGui.QPixmap(24, 24)
43 pixelmap.fill(color)
44 self.setIcon(0, QtGui.QIcon(pixelmap))
45 self.setFlags(self.flags() | QtCore.Qt.ItemIsUserCheckable)
46 self.setCheckState(0, QtCore.Qt.Checked)
47 self.setText(1, basename(results.data_filename))
48
49 self.setStatus(results.procedure.status)
50
51 self.progressbar = QtGui.QProgressBar()
52 self.progressbar.setRange(0, 100)
53 self.progressbar.setValue(0)
54
55 def setStatus(self, status):
56 status_label = {
57 Procedure.QUEUED: 'Queued', Procedure.RUNNING: 'Running',
58 Procedure.FAILED: 'Failed', Procedure.ABORTED: 'Aborted',
59 Procedure.FINISHED: 'Finished'}
60 self.setText(3, status_label[status])
61
62 if status == Procedure.FAILED or status == Procedure.ABORTED:
63 # Set progress bar color to red
64 return # Commented this out
65 self.progressbar.setStyleSheet("""
66 QProgressBar {
67 border: 1px solid #AAAAAA;
68 border-radius: 5px;
69 text-align: center;
70 }
71 QProgressBar::chunk {
72 background-color: red;
73 }
74 """)
75
76 def setProgress(self, progress):
77 self.progressbar.setValue(progress)
78
79
80 class Browser(QtGui.QTreeWidget):
81 """Graphical list view of :class:`Experiment<pymeasure.display.manager.Experiment>`
82 objects allowing the user to view the status of queued Experiments as well as
83 loading and displaying data from previous runs.
84
85 In order that different Experiments be displayed within the same Browser,
86 they must have entries in `DATA_COLUMNS` corresponding to the
87 `measured_quantities` of the Browser.
88 """
89
90 def __init__(self, procedure_class, display_parameters,
91 measured_quantities, sort_by_filename=False, parent=None):
92 super().__init__(parent)
93 self.display_parameters = display_parameters
94 self.procedure_class = procedure_class
95 self.measured_quantities = measured_quantities
96
97 header_labels = ["Graph", "Filename", "Progress", "Status"]
98 for parameter in self.display_parameters:
99 header_labels.append(getattr(self.procedure_class, parameter).name)
100
101 self.setColumnCount(len(header_labels))
102 self.setHeaderLabels(header_labels)
103 self.setSortingEnabled(True)
104 if sort_by_filename:
105 self.sortItems(1, QtCore.Qt.AscendingOrder)
106
107 for i, width in enumerate([80, 140]):
108 self.header().resizeSection(i, width)
109
110 def add(self, experiment):
111 """Add a :class:`Experiment<pymeasure.display.manager.Experiment>` object
112 to the Browser. This function checks to make sure that the Experiment
113 measures the appropriate quantities to warrant its inclusion, and then
114 adds a BrowserItem to the Browser, filling all relevant columns with
115 Parameter data.
116 """
117 experiment_parameters = experiment.procedure.parameter_objects()
118 experiment_parameter_names = list(experiment_parameters.keys())
119
120 for measured_quantity in self.measured_quantities:
121 if measured_quantity not in experiment.procedure.DATA_COLUMNS:
122 raise Exception("Procedure does not measure the"
123 " %s quantity." % measured_quantity)
124
125 # Set the relevant fields within the BrowserItem if
126 # that Parameter is implemented
127 item = experiment.browser_item
128 for i, column in enumerate(self.display_parameters):
129 if column in experiment_parameter_names:
130 item.setText(i + 4, str(experiment_parameters[column]))
131
132 self.addTopLevelItem(item)
133 self.setItemWidget(item, 2, item.progressbar)
134 return item
135
[end of pymeasure/display/browser.py]
[start of setup.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2021 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25
26 from setuptools import setup, find_packages
27
28 setup(
29 name='PyMeasure',
30 version='0.9.0',
31 author='PyMeasure Developers',
32 packages=find_packages(),
33 scripts=[],
34 url='https://github.com/pymeasure/pymeasure',
35 download_url='https://github.com/pymeasure/pymeasure/tarball/v0.9.0',
36 license='MIT License',
37 description='Scientific measurement library for instruments, experiments, and live-plotting',
38 long_description=open('README.rst').read() + "\n\n" + open('CHANGES.txt').read(),
39 install_requires=[
40 "numpy >= 1.6.1",
41 "pandas >= 0.14",
42 "pyvisa >= 1.8",
43 "pyserial >= 2.7",
44 "pyqtgraph >= 0.9.10"
45 ],
46 extras_require={
47 'matplotlib': ['matplotlib >= 2.0.2'],
48 'tcp': [
49 'pyzmq >= 16.0.2',
50 'cloudpickle >= 0.3.1'
51 ],
52 'python-vxi11': ['python-vxi11 >= 0.9']
53 },
54 setup_requires=[
55 'pytest-runner'
56 ],
57 tests_require=[
58 'pytest >= 2.9.1',
59 'pytest-qt >= 2.4.0',
60 'pyvisa-sim >= 0.4.0',
61 ],
62 classifiers=[
63 "Development Status :: 4 - Beta",
64 "Intended Audience :: Science/Research",
65 "License :: OSI Approved :: MIT License",
66 "Operating System :: MacOS",
67 "Operating System :: Microsoft :: Windows",
68 "Operating System :: POSIX",
69 "Operating System :: Unix",
70 "Programming Language :: Python :: 3 :: Only",
71 "Programming Language :: Python :: 3.7",
72 "Programming Language :: Python :: 3.8",
73 "Programming Language :: Python :: 3.9",
74 "Topic :: Scientific/Engineering",
75 ],
76 keywords="measure instrument experiment control automate graph plot"
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pymeasure/display/browser.py b/pymeasure/display/browser.py
--- a/pymeasure/display/browser.py
+++ b/pymeasure/display/browser.py
@@ -74,7 +74,7 @@
""")
def setProgress(self, progress):
- self.progressbar.setValue(progress)
+ self.progressbar.setValue(int(progress))
class Browser(QtGui.QTreeWidget):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -71,6 +71,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
],
keywords="measure instrument experiment control automate graph plot"
|
{"golden_diff": "diff --git a/pymeasure/display/browser.py b/pymeasure/display/browser.py\n--- a/pymeasure/display/browser.py\n+++ b/pymeasure/display/browser.py\n@@ -74,7 +74,7 @@\n \"\"\")\n \n def setProgress(self, progress):\n- self.progressbar.setValue(progress)\n+ self.progressbar.setValue(int(progress))\n \n \n class Browser(QtGui.QTreeWidget):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,6 +71,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n", "issue": "Python 3.10: ManagedWindow TypeError\nHi all,\r\nI am trying to use pymeasure for the first time. But when running the example \"ManagedWindow\" I get the following error message as soon as I press the \"queue\" button in the GUI and nothing will be plotted. \r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\manager.py in _update_progress(self, progress)\r\n 158 def _update_progress(self, progress):\r\n 159 if self.is_running():\r\n--> 160 self._running_experiment.browser_item.setProgress(progress)\r\n 161 \r\n 162 def _update_status(self, status):\r\n\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\browser.py in setProgress(self, progress)\r\n 73 \r\n 74 def setProgress(self, progress):\r\n---> 75 self.progressbar.setValue(progress)\r\n 76 \r\n 77 class Browser(QtGui.QTreeWidget):\r\n\r\nTypeError: setValue(self, int): argument 1 has unexpected type 'float'---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\manager.py in _finish(self)\r\n 253 experiment = self._running_experiment\r\n 254 self._clean_up()\r\n--> 255 experiment.browser_item.setProgress(100.)\r\n 256 experiment.curve.update()\r\n 257 self.finished.emit(experiment)\r\n\r\nC:\\Program Files\\Python 3.10\\lib\\site-packages\\pymeasure\\display\\browser.py in setProgress(self, progress)\r\n 73 \r\n 74 def setProgress(self, progress):\r\n---> 75 self.progressbar.setValue(progress)\r\n 76 \r\n 77 class Browser(QtGui.QTreeWidget):\r\n\r\nTypeError: setValue(self, int): argument 1 has unexpected type 'float'An exception has occurred, use %tb to see the full traceback.\r\n```\r\n\r\nI'm using Windows and Python 3.10.\r\n\r\nDo you have an idea what is wrong? Probably something with the data type of progress. But what should I change? \r\nThanks for your help.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nfrom os.path import basename\n\nfrom .Qt import QtCore, QtGui\nfrom ..experiment import Procedure\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass BrowserItem(QtGui.QTreeWidgetItem):\n \"\"\" Represent a row in the :class:`~pymeasure.display.browser.Browser` tree widget \"\"\"\n\n def __init__(self, results, color, parent=None):\n super().__init__(parent)\n\n pixelmap = QtGui.QPixmap(24, 24)\n pixelmap.fill(color)\n self.setIcon(0, QtGui.QIcon(pixelmap))\n self.setFlags(self.flags() | QtCore.Qt.ItemIsUserCheckable)\n self.setCheckState(0, QtCore.Qt.Checked)\n self.setText(1, basename(results.data_filename))\n\n self.setStatus(results.procedure.status)\n\n self.progressbar = QtGui.QProgressBar()\n self.progressbar.setRange(0, 100)\n self.progressbar.setValue(0)\n\n def setStatus(self, status):\n status_label = {\n Procedure.QUEUED: 'Queued', Procedure.RUNNING: 'Running',\n Procedure.FAILED: 'Failed', Procedure.ABORTED: 'Aborted',\n Procedure.FINISHED: 'Finished'}\n self.setText(3, status_label[status])\n\n if status == Procedure.FAILED or status == Procedure.ABORTED:\n # Set progress bar color to red\n return # Commented this out\n self.progressbar.setStyleSheet(\"\"\"\n QProgressBar {\n border: 1px solid #AAAAAA;\n border-radius: 5px;\n text-align: center;\n }\n QProgressBar::chunk {\n background-color: red;\n }\n \"\"\")\n\n def setProgress(self, progress):\n self.progressbar.setValue(progress)\n\n\nclass Browser(QtGui.QTreeWidget):\n \"\"\"Graphical list view of :class:`Experiment<pymeasure.display.manager.Experiment>`\n objects allowing the user to view the status of queued Experiments as well as\n loading and displaying data from previous runs.\n\n In order that different Experiments be displayed within the same Browser,\n they must have entries in `DATA_COLUMNS` corresponding to the\n `measured_quantities` of the Browser.\n \"\"\"\n\n def __init__(self, procedure_class, display_parameters,\n measured_quantities, sort_by_filename=False, parent=None):\n super().__init__(parent)\n self.display_parameters = display_parameters\n self.procedure_class = procedure_class\n self.measured_quantities = measured_quantities\n\n header_labels = [\"Graph\", \"Filename\", \"Progress\", \"Status\"]\n for parameter in self.display_parameters:\n header_labels.append(getattr(self.procedure_class, parameter).name)\n\n self.setColumnCount(len(header_labels))\n self.setHeaderLabels(header_labels)\n self.setSortingEnabled(True)\n if sort_by_filename:\n self.sortItems(1, QtCore.Qt.AscendingOrder)\n\n for i, width in enumerate([80, 140]):\n self.header().resizeSection(i, width)\n\n def add(self, experiment):\n \"\"\"Add a :class:`Experiment<pymeasure.display.manager.Experiment>` object\n to the Browser. This function checks to make sure that the Experiment\n measures the appropriate quantities to warrant its inclusion, and then\n adds a BrowserItem to the Browser, filling all relevant columns with\n Parameter data.\n \"\"\"\n experiment_parameters = experiment.procedure.parameter_objects()\n experiment_parameter_names = list(experiment_parameters.keys())\n\n for measured_quantity in self.measured_quantities:\n if measured_quantity not in experiment.procedure.DATA_COLUMNS:\n raise Exception(\"Procedure does not measure the\"\n \" %s quantity.\" % measured_quantity)\n\n # Set the relevant fields within the BrowserItem if\n # that Parameter is implemented\n item = experiment.browser_item\n for i, column in enumerate(self.display_parameters):\n if column in experiment_parameter_names:\n item.setText(i + 4, str(experiment_parameters[column]))\n\n self.addTopLevelItem(item)\n self.setItemWidget(item, 2, item.progressbar)\n return item\n", "path": "pymeasure/display/browser.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='PyMeasure',\n version='0.9.0',\n author='PyMeasure Developers',\n packages=find_packages(),\n scripts=[],\n url='https://github.com/pymeasure/pymeasure',\n download_url='https://github.com/pymeasure/pymeasure/tarball/v0.9.0',\n license='MIT License',\n description='Scientific measurement library for instruments, experiments, and live-plotting',\n long_description=open('README.rst').read() + \"\\n\\n\" + open('CHANGES.txt').read(),\n install_requires=[\n \"numpy >= 1.6.1\",\n \"pandas >= 0.14\",\n \"pyvisa >= 1.8\",\n \"pyserial >= 2.7\",\n \"pyqtgraph >= 0.9.10\"\n ],\n extras_require={\n 'matplotlib': ['matplotlib >= 2.0.2'],\n 'tcp': [\n 'pyzmq >= 16.0.2',\n 'cloudpickle >= 0.3.1'\n ],\n 'python-vxi11': ['python-vxi11 >= 0.9']\n },\n setup_requires=[\n 'pytest-runner'\n ],\n tests_require=[\n 'pytest >= 2.9.1',\n 'pytest-qt >= 2.4.0',\n 'pyvisa-sim >= 0.4.0',\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n ],\n keywords=\"measure instrument experiment control automate graph plot\"\n)\n", "path": "setup.py"}]}
| 3,344 | 188 |
gh_patches_debug_37662
|
rasdani/github-patches
|
git_diff
|
GPflow__GPflow-2063
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing documentation for SquaredExponential kernel
# Documentation
*Is there anything missing in the docs?*
Documentation of gpflow.kernels.stationaries.SquaredExponential went missing sometime after 2.4.0
It is there in [2.4.0](https://gpflow.github.io/GPflow/2.4.0/api/gpflow/kernels/index.html#gpflow-kernels-squaredexponential).
In [2.5.0](https://gpflow.github.io/GPflow/2.5.0/api/gpflow/kernels/index.html#gpflow-kernels-rbf) (and [currently](https://gpflow.github.io/GPflow/develop/api/gpflow/kernels/index.html#gpflow-kernels-rbf)), the entry was replaced by the RBF "as an alias of `gpflow.kernels.stationaries.SquaredExponential`", but there is no `SquaredExponential` entry.
</issue>
<code>
[start of doc/generate_module_rst.py]
1 # Copyright 2019 GPflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Script to autogenerate .rst files for autodocumentation of classes and modules in GPflow.
15 To be run by the CI system to update docs.
16 """
17 import inspect
18 from dataclasses import dataclass
19 from pathlib import Path
20 from types import ModuleType
21 from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union
22
23 from gpflow.utilities import Dispatcher
24
25 RST_LEVEL_SYMBOLS = ["=", "-", "~", '"', "'", "^"]
26
27 IGNORE_MODULES = {
28 "gpflow.covariances.dispatch",
29 "gpflow.conditionals.dispatch",
30 "gpflow.expectations.dispatch",
31 "gpflow.kullback_leiblers.dispatch",
32 "gpflow.versions",
33 }
34
35
36 def _header(header: str, level: int) -> str:
37 return f"{header}\n{RST_LEVEL_SYMBOLS[level] * len(header)}"
38
39
40 @dataclass
41 class DocumentableDispatcher:
42
43 name: str
44 obj: Dispatcher
45
46 def implementations(self) -> Mapping[Callable[..., Any], List[Type[Any]]]:
47 implementations: Dict[Callable[..., Any], List[Type[Any]]] = {}
48 for args, impl in self.obj.funcs.items():
49 implementations.setdefault(impl, []).append(args)
50 return implementations
51
52 def write(self, out: TextIO) -> None:
53 out.write(
54 f"""
55 {_header(self.name, 2)}
56
57 This function uses multiple dispatch, which will depend on the type of argument passed in:
58 """
59 )
60 for impl, argss in self.implementations().items():
61 impl_name = f"{impl.__module__}.{impl.__name__}"
62
63 out.write(
64 """
65 .. code-block:: python
66
67 """
68 )
69 for args in argss:
70 arg_names = ", ".join([a.__name__ for a in args])
71 out.write(f" {self.name}( {arg_names} )\n")
72 out.write(f" # dispatch to -> {impl_name}(...)\n")
73 out.write(
74 f"""
75 .. autofunction:: {impl_name}
76 """
77 )
78
79
80 @dataclass
81 class DocumentableClass:
82
83 name: str
84 obj: Type[Any]
85
86 def write(self, out: TextIO) -> None:
87 out.write(
88 f"""
89 {_header(self.name, 2)}
90
91 .. autoclass:: {self.name}
92 :show-inheritance:
93 :members:
94 """
95 )
96
97
98 @dataclass
99 class DocumentableFunction:
100
101 name: str
102 obj: Callable[..., Any]
103
104 def write(self, out: TextIO) -> None:
105 out.write(
106 f"""
107 {_header(self.name, 2)}
108
109 .. autofunction:: {self.name}
110 """
111 )
112
113
114 @dataclass
115 class DocumentableModule:
116
117 name: str
118 obj: ModuleType
119 modules: List["DocumentableModule"]
120 classes: List[DocumentableClass]
121 functions: List[Union[DocumentableDispatcher, DocumentableFunction]]
122
123 @staticmethod
124 def collect(
125 root: ModuleType,
126 ) -> "DocumentableModule":
127 root_name = root.__name__
128 exported_names = set(getattr(root, "__all__", []))
129
130 modules: List["DocumentableModule"] = []
131 classes: List[DocumentableClass] = []
132 functions: List[Union[DocumentableDispatcher, DocumentableFunction]] = []
133
134 for key in dir(root):
135 if key.startswith("_"):
136 continue
137
138 child = getattr(root, key)
139 child_name = root_name + "." + key
140 if child_name in IGNORE_MODULES:
141 continue
142
143 # pylint: disable=cell-var-from-loop
144 def _should_ignore(child: Union[Callable[..., Any], Type[Any]]) -> bool:
145 declared_in_root = child.__module__ == root_name
146 explicitly_exported = key in exported_names
147 return not (declared_in_root or explicitly_exported)
148
149 # pylint: enable=cell-var-from-loop
150
151 if isinstance(child, Dispatcher):
152 functions.append(DocumentableDispatcher(child_name, child))
153 elif inspect.ismodule(child):
154 if child.__name__ != child_name: # Ignore imports of modules.
155 continue
156 modules.append(DocumentableModule.collect(child))
157 elif inspect.isclass(child):
158 if _should_ignore(child):
159 continue
160 classes.append(DocumentableClass(child_name, child))
161 elif inspect.isfunction(child):
162 if _should_ignore(child):
163 continue
164 functions.append(DocumentableFunction(child_name, child))
165
166 return DocumentableModule(root_name, root, modules, classes, functions)
167
168 def seen_in_dispatchers(self, seen: Set[int]) -> None:
169 for module in self.modules:
170 module.seen_in_dispatchers(seen)
171 for function in self.functions:
172 if isinstance(function, DocumentableDispatcher):
173 impls = function.obj.funcs.values()
174 for impl in impls:
175 seen.add(id(impl))
176
177 def prune_duplicates(self) -> None:
178 seen: Set[int] = set()
179 self.seen_in_dispatchers(seen)
180
181 # Breadth-first search so that we prefer objects with shorter names.
182 todo = Deque([self])
183 while todo:
184 module = todo.popleft()
185
186 new_classes = []
187 for c in module.classes:
188 if id(c.obj) not in seen:
189 seen.add(id(c.obj))
190 new_classes.append(c)
191 module.classes = new_classes
192
193 new_functions = []
194 for f in module.functions:
195 if id(f.obj) not in seen:
196 seen.add(id(f.obj))
197 new_functions.append(f)
198 module.functions = new_functions
199
200 todo.extend(module.modules)
201
202 def prune_empty_modules(self) -> None:
203 new_modules = []
204 for m in self.modules:
205 m.prune_empty_modules()
206
207 if m.modules or m.classes or m.functions:
208 new_modules.append(m)
209 self.modules = new_modules
210
211 def prune(self) -> None:
212 self.prune_duplicates()
213 self.prune_empty_modules()
214
215 def write_modules(self, out: TextIO) -> None:
216 if not self.modules:
217 return
218
219 out.write(
220 f"""
221 {_header('Modules', 1)}
222
223 .. toctree::
224 :maxdepth: 1
225
226 """
227 )
228 for module in self.modules:
229 out.write(f" {module.name} <{module.name.split('.')[-1]}/index>\n")
230
231 def write_classes(self, out: TextIO) -> None:
232 if not self.classes:
233 return
234
235 out.write(
236 f"""
237 {_header('Classes', 1)}
238 """
239 )
240 for cls in self.classes:
241 cls.write(out)
242
243 def write_functions(self, out: TextIO) -> None:
244 if not self.functions:
245 return
246
247 out.write(
248 f"""
249 {_header('Functions', 1)}
250 """
251 )
252 for function in self.functions:
253 function.write(out)
254
255 def write(self, path: Path) -> None:
256 dir_path = path / f"{self.name.replace('.', '/')}"
257 dir_path.mkdir(parents=True, exist_ok=True)
258 index_path = dir_path / "index.rst"
259 with index_path.open("wt") as out:
260 print("Writing", index_path)
261 out.write(
262 f"""{_header(self.name, 0)}
263
264 .. THIS IS AN AUTOGENERATED RST FILE
265
266 .. automodule:: {self.name}
267 """
268 )
269 self.write_modules(out)
270 self.write_classes(out)
271 self.write_functions(out)
272
273 for module in self.modules:
274 module.write(path)
275
276 def str_into(self, indent: int, lines: List[str]) -> None:
277 lines.append(2 * indent * " " + "Module: " + self.name)
278 for module in self.modules:
279 module.str_into(indent + 1, lines)
280 for cls in self.classes:
281 lines.append(2 * (indent + 1) * " " + "Class: " + cls.name)
282 for function in self.functions:
283 lines.append(2 * (indent + 1) * " " + "Function: " + function.name)
284
285 def __str__(self) -> str:
286 lines: List[str] = []
287 self.str_into(0, lines)
288 return "\n".join(lines)
289
290
291 def generate_module_rst(module: ModuleType, dest: Path) -> None:
292 """
293 Traverses the given `module` and generates `.rst` files for Sphinx.
294 """
295 docs = DocumentableModule.collect(module)
296 docs.prune()
297 docs.write(dest)
298
[end of doc/generate_module_rst.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/doc/generate_module_rst.py b/doc/generate_module_rst.py
--- a/doc/generate_module_rst.py
+++ b/doc/generate_module_rst.py
@@ -18,7 +18,7 @@
from dataclasses import dataclass
from pathlib import Path
from types import ModuleType
-from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union
+from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Tuple, Type, Union
from gpflow.utilities import Dispatcher
@@ -165,17 +165,19 @@
return DocumentableModule(root_name, root, modules, classes, functions)
- def seen_in_dispatchers(self, seen: Set[int]) -> None:
+ def seen_in_dispatchers(self, seen: Set[Tuple[str, int]]) -> None:
for module in self.modules:
module.seen_in_dispatchers(seen)
for function in self.functions:
if isinstance(function, DocumentableDispatcher):
impls = function.obj.funcs.values()
for impl in impls:
- seen.add(id(impl))
+ # See comment below (for classes) about aliases.
+ key = (impl.__name__, id(impl))
+ seen.add(key)
def prune_duplicates(self) -> None:
- seen: Set[int] = set()
+ seen: Set[Tuple[str, int]] = set()
self.seen_in_dispatchers(seen)
# Breadth-first search so that we prefer objects with shorter names.
@@ -185,15 +187,20 @@
new_classes = []
for c in module.classes:
- if id(c.obj) not in seen:
- seen.add(id(c.obj))
+ # Account for objects to have aliases, hence include the object name in the key.
+ # We want to generate documentation for both the alias and the original object.
+ key = (c.name[c.name.rfind(".") + 1 :], id(c.obj))
+ if key not in seen:
+ seen.add(key)
new_classes.append(c)
module.classes = new_classes
new_functions = []
for f in module.functions:
- if id(f.obj) not in seen:
- seen.add(id(f.obj))
+ # See comment above (for classes) about aliases.
+ key = (f.name[f.name.rfind(".") + 1 :], id(f.obj))
+ if key not in seen:
+ seen.add(key)
new_functions.append(f)
module.functions = new_functions
|
{"golden_diff": "diff --git a/doc/generate_module_rst.py b/doc/generate_module_rst.py\n--- a/doc/generate_module_rst.py\n+++ b/doc/generate_module_rst.py\n@@ -18,7 +18,7 @@\n from dataclasses import dataclass\n from pathlib import Path\n from types import ModuleType\n-from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union\n+from typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Tuple, Type, Union\n \n from gpflow.utilities import Dispatcher\n \n@@ -165,17 +165,19 @@\n \n return DocumentableModule(root_name, root, modules, classes, functions)\n \n- def seen_in_dispatchers(self, seen: Set[int]) -> None:\n+ def seen_in_dispatchers(self, seen: Set[Tuple[str, int]]) -> None:\n for module in self.modules:\n module.seen_in_dispatchers(seen)\n for function in self.functions:\n if isinstance(function, DocumentableDispatcher):\n impls = function.obj.funcs.values()\n for impl in impls:\n- seen.add(id(impl))\n+ # See comment below (for classes) about aliases.\n+ key = (impl.__name__, id(impl))\n+ seen.add(key)\n \n def prune_duplicates(self) -> None:\n- seen: Set[int] = set()\n+ seen: Set[Tuple[str, int]] = set()\n self.seen_in_dispatchers(seen)\n \n # Breadth-first search so that we prefer objects with shorter names.\n@@ -185,15 +187,20 @@\n \n new_classes = []\n for c in module.classes:\n- if id(c.obj) not in seen:\n- seen.add(id(c.obj))\n+ # Account for objects to have aliases, hence include the object name in the key.\n+ # We want to generate documentation for both the alias and the original object.\n+ key = (c.name[c.name.rfind(\".\") + 1 :], id(c.obj))\n+ if key not in seen:\n+ seen.add(key)\n new_classes.append(c)\n module.classes = new_classes\n \n new_functions = []\n for f in module.functions:\n- if id(f.obj) not in seen:\n- seen.add(id(f.obj))\n+ # See comment above (for classes) about aliases.\n+ key = (f.name[f.name.rfind(\".\") + 1 :], id(f.obj))\n+ if key not in seen:\n+ seen.add(key)\n new_functions.append(f)\n module.functions = new_functions\n", "issue": "Missing documentation for SquaredExponential kernel\n# Documentation\r\n\r\n*Is there anything missing in the docs?*\r\n\r\nDocumentation of gpflow.kernels.stationaries.SquaredExponential went missing sometime after 2.4.0\r\n\r\nIt is there in [2.4.0](https://gpflow.github.io/GPflow/2.4.0/api/gpflow/kernels/index.html#gpflow-kernels-squaredexponential).\r\n\r\nIn [2.5.0](https://gpflow.github.io/GPflow/2.5.0/api/gpflow/kernels/index.html#gpflow-kernels-rbf) (and [currently](https://gpflow.github.io/GPflow/develop/api/gpflow/kernels/index.html#gpflow-kernels-rbf)), the entry was replaced by the RBF \"as an alias of `gpflow.kernels.stationaries.SquaredExponential`\", but there is no `SquaredExponential` entry.\r\n\n", "before_files": [{"content": "# Copyright 2019 GPflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script to autogenerate .rst files for autodocumentation of classes and modules in GPflow.\nTo be run by the CI system to update docs.\n\"\"\"\nimport inspect\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import Any, Callable, Deque, Dict, List, Mapping, Set, TextIO, Type, Union\n\nfrom gpflow.utilities import Dispatcher\n\nRST_LEVEL_SYMBOLS = [\"=\", \"-\", \"~\", '\"', \"'\", \"^\"]\n\nIGNORE_MODULES = {\n \"gpflow.covariances.dispatch\",\n \"gpflow.conditionals.dispatch\",\n \"gpflow.expectations.dispatch\",\n \"gpflow.kullback_leiblers.dispatch\",\n \"gpflow.versions\",\n}\n\n\ndef _header(header: str, level: int) -> str:\n return f\"{header}\\n{RST_LEVEL_SYMBOLS[level] * len(header)}\"\n\n\n@dataclass\nclass DocumentableDispatcher:\n\n name: str\n obj: Dispatcher\n\n def implementations(self) -> Mapping[Callable[..., Any], List[Type[Any]]]:\n implementations: Dict[Callable[..., Any], List[Type[Any]]] = {}\n for args, impl in self.obj.funcs.items():\n implementations.setdefault(impl, []).append(args)\n return implementations\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\nThis function uses multiple dispatch, which will depend on the type of argument passed in:\n\"\"\"\n )\n for impl, argss in self.implementations().items():\n impl_name = f\"{impl.__module__}.{impl.__name__}\"\n\n out.write(\n \"\"\"\n.. code-block:: python\n\n\"\"\"\n )\n for args in argss:\n arg_names = \", \".join([a.__name__ for a in args])\n out.write(f\" {self.name}( {arg_names} )\\n\")\n out.write(f\" # dispatch to -> {impl_name}(...)\\n\")\n out.write(\n f\"\"\"\n.. autofunction:: {impl_name}\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableClass:\n\n name: str\n obj: Type[Any]\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\n.. autoclass:: {self.name}\n :show-inheritance:\n :members:\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableFunction:\n\n name: str\n obj: Callable[..., Any]\n\n def write(self, out: TextIO) -> None:\n out.write(\n f\"\"\"\n{_header(self.name, 2)}\n\n.. autofunction:: {self.name}\n\"\"\"\n )\n\n\n@dataclass\nclass DocumentableModule:\n\n name: str\n obj: ModuleType\n modules: List[\"DocumentableModule\"]\n classes: List[DocumentableClass]\n functions: List[Union[DocumentableDispatcher, DocumentableFunction]]\n\n @staticmethod\n def collect(\n root: ModuleType,\n ) -> \"DocumentableModule\":\n root_name = root.__name__\n exported_names = set(getattr(root, \"__all__\", []))\n\n modules: List[\"DocumentableModule\"] = []\n classes: List[DocumentableClass] = []\n functions: List[Union[DocumentableDispatcher, DocumentableFunction]] = []\n\n for key in dir(root):\n if key.startswith(\"_\"):\n continue\n\n child = getattr(root, key)\n child_name = root_name + \".\" + key\n if child_name in IGNORE_MODULES:\n continue\n\n # pylint: disable=cell-var-from-loop\n def _should_ignore(child: Union[Callable[..., Any], Type[Any]]) -> bool:\n declared_in_root = child.__module__ == root_name\n explicitly_exported = key in exported_names\n return not (declared_in_root or explicitly_exported)\n\n # pylint: enable=cell-var-from-loop\n\n if isinstance(child, Dispatcher):\n functions.append(DocumentableDispatcher(child_name, child))\n elif inspect.ismodule(child):\n if child.__name__ != child_name: # Ignore imports of modules.\n continue\n modules.append(DocumentableModule.collect(child))\n elif inspect.isclass(child):\n if _should_ignore(child):\n continue\n classes.append(DocumentableClass(child_name, child))\n elif inspect.isfunction(child):\n if _should_ignore(child):\n continue\n functions.append(DocumentableFunction(child_name, child))\n\n return DocumentableModule(root_name, root, modules, classes, functions)\n\n def seen_in_dispatchers(self, seen: Set[int]) -> None:\n for module in self.modules:\n module.seen_in_dispatchers(seen)\n for function in self.functions:\n if isinstance(function, DocumentableDispatcher):\n impls = function.obj.funcs.values()\n for impl in impls:\n seen.add(id(impl))\n\n def prune_duplicates(self) -> None:\n seen: Set[int] = set()\n self.seen_in_dispatchers(seen)\n\n # Breadth-first search so that we prefer objects with shorter names.\n todo = Deque([self])\n while todo:\n module = todo.popleft()\n\n new_classes = []\n for c in module.classes:\n if id(c.obj) not in seen:\n seen.add(id(c.obj))\n new_classes.append(c)\n module.classes = new_classes\n\n new_functions = []\n for f in module.functions:\n if id(f.obj) not in seen:\n seen.add(id(f.obj))\n new_functions.append(f)\n module.functions = new_functions\n\n todo.extend(module.modules)\n\n def prune_empty_modules(self) -> None:\n new_modules = []\n for m in self.modules:\n m.prune_empty_modules()\n\n if m.modules or m.classes or m.functions:\n new_modules.append(m)\n self.modules = new_modules\n\n def prune(self) -> None:\n self.prune_duplicates()\n self.prune_empty_modules()\n\n def write_modules(self, out: TextIO) -> None:\n if not self.modules:\n return\n\n out.write(\n f\"\"\"\n{_header('Modules', 1)}\n\n.. toctree::\n :maxdepth: 1\n\n\"\"\"\n )\n for module in self.modules:\n out.write(f\" {module.name} <{module.name.split('.')[-1]}/index>\\n\")\n\n def write_classes(self, out: TextIO) -> None:\n if not self.classes:\n return\n\n out.write(\n f\"\"\"\n{_header('Classes', 1)}\n\"\"\"\n )\n for cls in self.classes:\n cls.write(out)\n\n def write_functions(self, out: TextIO) -> None:\n if not self.functions:\n return\n\n out.write(\n f\"\"\"\n{_header('Functions', 1)}\n\"\"\"\n )\n for function in self.functions:\n function.write(out)\n\n def write(self, path: Path) -> None:\n dir_path = path / f\"{self.name.replace('.', '/')}\"\n dir_path.mkdir(parents=True, exist_ok=True)\n index_path = dir_path / \"index.rst\"\n with index_path.open(\"wt\") as out:\n print(\"Writing\", index_path)\n out.write(\n f\"\"\"{_header(self.name, 0)}\n\n.. THIS IS AN AUTOGENERATED RST FILE\n\n.. automodule:: {self.name}\n\"\"\"\n )\n self.write_modules(out)\n self.write_classes(out)\n self.write_functions(out)\n\n for module in self.modules:\n module.write(path)\n\n def str_into(self, indent: int, lines: List[str]) -> None:\n lines.append(2 * indent * \" \" + \"Module: \" + self.name)\n for module in self.modules:\n module.str_into(indent + 1, lines)\n for cls in self.classes:\n lines.append(2 * (indent + 1) * \" \" + \"Class: \" + cls.name)\n for function in self.functions:\n lines.append(2 * (indent + 1) * \" \" + \"Function: \" + function.name)\n\n def __str__(self) -> str:\n lines: List[str] = []\n self.str_into(0, lines)\n return \"\\n\".join(lines)\n\n\ndef generate_module_rst(module: ModuleType, dest: Path) -> None:\n \"\"\"\n Traverses the given `module` and generates `.rst` files for Sphinx.\n \"\"\"\n docs = DocumentableModule.collect(module)\n docs.prune()\n docs.write(dest)\n", "path": "doc/generate_module_rst.py"}]}
| 3,503 | 574 |
gh_patches_debug_38763
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-227
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
atexit module can't handle multiple instances
The `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.
Additionally, this means that `pwnlib.atexit` does not guarantee order.
Per the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:
_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._
However, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.
</issue>
<code>
[start of pwnlib/atexit.py]
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10
11 import sys, traceback
12
13 __all__ = ['register', 'unregister']
14
15 _handlers = {}
16
17 def register(func, *args, **kwargs):
18 """register(func, *args, **kwargs)
19
20 Registers a function to be called when an unhandled exception occurs. The
21 function will be called with positional arguments `args` and keyword
22 arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
23
24 If `func` is already registered then `args` and `kwargs` will be updated.
25
26 This function can be used as a decorator::
27
28 def f():
29 ...
30 atexit.register(f)
31
32 is equivalent to this::
33
34 @atexit.register
35 def f():
36 ...
37
38 """
39 _handlers[func] = (args, kwargs)
40 return func
41
42 def unregister(func):
43 """unregister(func)
44
45 Remove `func` from the collection of registered functions. If `func` isn't
46 registered this is a no-op.
47 """
48 if func in _handlers:
49 del _handlers[func]
50
51 def _run_handlers():
52 """_run_handlers()
53
54 Run registered exit handlers. The order is arbitrary.
55
56 If a handler raises an exception, it will be printed but nothing else
57 happens, i.e. other handlers will be run and `sys.excepthook` will not be
58 called for that reason.
59 """
60 for func, (args, kwargs) in _handlers.items():
61 try:
62 func(*args, **kwargs)
63 except SystemExit:
64 pass
65 except:
66 # extract the current exception and rewind the traceback to where it
67 # originated
68 typ, val, tb = sys.exc_info()
69 traceback.print_exception(typ, val, tb.tb_next)
70
71 # if there's already an exitfunc registered be sure to run that too
72 if hasattr(sys, "exitfunc"):
73 register(sys.exitfunc)
74
75 sys.exitfunc = _run_handlers
76
[end of pwnlib/atexit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -8,58 +8,76 @@
printed twice when the standard :mod:`atexit` is used.
"""
-import sys, traceback
+import sys, traceback, threading
+from .context import context
__all__ = ['register', 'unregister']
+_lock = threading.Lock()
+_ident = 0
_handlers = {}
def register(func, *args, **kwargs):
"""register(func, *args, **kwargs)
- Registers a function to be called when an unhandled exception occurs. The
- function will be called with positional arguments `args` and keyword
- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
+ Registers a function to be called on program termination. The function will
+ be called with positional arguments `args` and keyword arguments `kwargs`,
+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
+ be the one used when the handler is run.
- If `func` is already registered then `args` and `kwargs` will be updated.
+ E.g. to suppress logging output from an exit-handler one could write::
- This function can be used as a decorator::
+ with context.local(log_level = 'error'):
+ atexit.register(handler)
- def f():
- ...
- atexit.register(f)
+ An identifier is returned which can be used to unregister the exit-handler.
- is equivalent to this::
+ This function can be used as a decorator::
@atexit.register
- def f():
+ def handler():
...
- """
- _handlers[func] = (args, kwargs)
- return func
+ Notice however that this will bind ``handler`` to the identifier and not the
+ actual exit-handler. The exit-handler can then be unregistered with::
+
+ atexit.unregister(handler)
-def unregister(func):
- """unregister(func)
+ This function is thread safe.
- Remove `func` from the collection of registered functions. If `func` isn't
- registered this is a no-op.
"""
- if func in _handlers:
- del _handlers[func]
+ global _ident
+ with _lock:
+ ident = _ident
+ _ident += 1
+ _handlers[ident] = (func, args, kwargs, vars(context))
+ return ident
+
+def unregister(ident):
+ """unregister(ident)
+
+ Remove the exit-handler identified by `ident` from the list of registered
+ handlers. If `ident` isn't registered this is a no-op.
+ """
+ if ident in _handlers:
+ del _handlers[ident]
def _run_handlers():
"""_run_handlers()
- Run registered exit handlers. The order is arbitrary.
+ Run registered exit-handlers. They run in the reverse order of which they
+ were registered.
If a handler raises an exception, it will be printed but nothing else
happens, i.e. other handlers will be run and `sys.excepthook` will not be
called for that reason.
"""
- for func, (args, kwargs) in _handlers.items():
+ context.clear()
+ for _ident, (func, args, kwargs, ctx) in \
+ sorted(_handlers.items(), reverse = True):
try:
- func(*args, **kwargs)
+ with context.local(**ctx):
+ func(*args, **kwargs)
except SystemExit:
pass
except:
|
{"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -8,58 +8,76 @@\n printed twice when the standard :mod:`atexit` is used.\n \"\"\"\n \n-import sys, traceback\n+import sys, traceback, threading\n+from .context import context\n \n __all__ = ['register', 'unregister']\n \n+_lock = threading.Lock()\n+_ident = 0\n _handlers = {}\n \n def register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n \n- Registers a function to be called when an unhandled exception occurs. The\n- function will be called with positional arguments `args` and keyword\n- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n+ Registers a function to be called on program termination. The function will\n+ be called with positional arguments `args` and keyword arguments `kwargs`,\n+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n+ be the one used when the handler is run.\n \n- If `func` is already registered then `args` and `kwargs` will be updated.\n+ E.g. to suppress logging output from an exit-handler one could write::\n \n- This function can be used as a decorator::\n+ with context.local(log_level = 'error'):\n+ atexit.register(handler)\n \n- def f():\n- ...\n- atexit.register(f)\n+ An identifier is returned which can be used to unregister the exit-handler.\n \n- is equivalent to this::\n+ This function can be used as a decorator::\n \n @atexit.register\n- def f():\n+ def handler():\n ...\n \n- \"\"\"\n- _handlers[func] = (args, kwargs)\n- return func\n+ Notice however that this will bind ``handler`` to the identifier and not the\n+ actual exit-handler. The exit-handler can then be unregistered with::\n+\n+ atexit.unregister(handler)\n \n-def unregister(func):\n- \"\"\"unregister(func)\n+ This function is thread safe.\n \n- Remove `func` from the collection of registered functions. If `func` isn't\n- registered this is a no-op.\n \"\"\"\n- if func in _handlers:\n- del _handlers[func]\n+ global _ident\n+ with _lock:\n+ ident = _ident\n+ _ident += 1\n+ _handlers[ident] = (func, args, kwargs, vars(context))\n+ return ident\n+\n+def unregister(ident):\n+ \"\"\"unregister(ident)\n+\n+ Remove the exit-handler identified by `ident` from the list of registered\n+ handlers. If `ident` isn't registered this is a no-op.\n+ \"\"\"\n+ if ident in _handlers:\n+ del _handlers[ident]\n \n def _run_handlers():\n \"\"\"_run_handlers()\n \n- Run registered exit handlers. The order is arbitrary.\n+ Run registered exit-handlers. They run in the reverse order of which they\n+ were registered.\n \n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n- for func, (args, kwargs) in _handlers.items():\n+ context.clear()\n+ for _ident, (func, args, kwargs, ctx) in \\\n+ sorted(_handlers.items(), reverse = True):\n try:\n- func(*args, **kwargs)\n+ with context.local(**ctx):\n+ func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n", "issue": "atexit module can't handle multiple instances\nThe `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.\n\nAdditionally, this means that `pwnlib.atexit` does not guarantee order.\n\nPer the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:\n\n_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._\n\nHowever, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.\n\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\n\nimport sys, traceback\n\n__all__ = ['register', 'unregister']\n\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called when an unhandled exception occurs. The\n function will be called with positional arguments `args` and keyword\n arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n\n If `func` is already registered then `args` and `kwargs` will be updated.\n\n This function can be used as a decorator::\n\n def f():\n ...\n atexit.register(f)\n\n is equivalent to this::\n\n @atexit.register\n def f():\n ...\n\n \"\"\"\n _handlers[func] = (args, kwargs)\n return func\n\ndef unregister(func):\n \"\"\"unregister(func)\n\n Remove `func` from the collection of registered functions. If `func` isn't\n registered this is a no-op.\n \"\"\"\n if func in _handlers:\n del _handlers[func]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit handlers. The order is arbitrary.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n for func, (args, kwargs) in _handlers.items():\n try:\n func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}]}
| 1,367 | 835 |
gh_patches_debug_27177
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-2177
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`context` gives wrong jump target for `jalr ra, ra, -0x16` in RISC-V
The jump target shown as annotation in the disassembly window is wrong:
```c++
pwndbg> si
pwndbg> set emulate on
pwndbg> context
► 0x1008e <_start+4> jalr ra, ra, -0x16 <main+8>
pwndbg> si
pwndbg> context
► 0x10074 <main> addi a7, zero, 0x40
```
Interestingly, `nearpc` shows the correct jump target:
```c++
► 0x1008e <_start+4> jalr ra, ra, -0x16 <main>
```
A workaround is `set emulate off`.
### test binary
[Download test binary](https://github.com/pwndbg/pwndbg/files/15366965/riscv-emu-bug.bin.txt). I created it from:
```asm
# riscv32-unknown-linux-gnu-as -march=rv32imac2p0 -o riscv-emu-bug.o riscv-emu-bug.s
# riscv32-unknown-linux-gnu-ld --no-relax -o riscv-emu-bug riscv-emu-bug.o
.section .rodata
greeting: .asciz "Hello world\n"
.equ greetlen, . - greeting
.section .text
main:
li a7, 64 # write
li a0, 1
la a1, greeting
li a2, greetlen
ecall
ret
.global _start
_start:
call main
li a7, 93 # exit
li a0, 0
ecall
```
### version information
pwndbg version: a1ddb3c0cd5e480710aa48f04ac560d659731bd8 (my fork)
```
Platform: Linux-6.9.1-arch1-1-x86_64-with-glibc2.39
OS: Arch Linux
OS ABI: #1 SMP PREEMPT_DYNAMIC Fri, 17 May 2024 16:56:38 +0000
Architecture: x86_64
Endian: little
Charset: utf-8
Width: 119
Height: 58
Gdb: 14.2
Python: 3.12.3 (main, Apr 23 2024, 09:16:07) [GCC 13.2.1 20240417]
Pwndbg: 2024.02.14 build: a1ddb3c0
Capstone: 5.0.1280
Unicorn: 2.0.1
This GDB was configured as follows:
configure --host=x86_64-pc-linux-gnu --target=x86_64-pc-linux-gnu
--with-auto-load-dir=$debugdir:$datadir/auto-load
--with-auto-load-safe-path=$debugdir:$datadir/auto-load
--with-expat
--with-gdb-datadir=/build/share/gdb (relocatable)
--with-jit-reader-dir=/build/lib/gdb (relocatable)
--without-libunwind-ia64
--with-lzma
--without-babeltrace
--without-intel-pt
--with-xxhash
--with-python=/usr
--with-python-libdir=/usr/lib
--with-debuginfod
--with-curses
--with-guile
--without-amd-dbgapi
--enable-source-highlight
--enable-threading
--enable-tui
--with-system-readline
--with-separate-debug-dir=/build/lib/debug (relocatable)
--with-system-gdbinit=/etc/gdb/gdbinit
```
</issue>
<code>
[start of pwndbg/disasm/riscv.py]
1 from __future__ import annotations
2
3 from capstone import * # noqa: F403
4 from capstone.riscv import * # noqa: F403
5
6 import pwndbg.disasm.arch
7 import pwndbg.gdblib.arch
8 import pwndbg.gdblib.regs
9 from pwndbg.disasm.instruction import InstructionCondition
10 from pwndbg.disasm.instruction import PwndbgInstruction
11 from pwndbg.emu.emulator import Emulator
12
13
14 class DisassemblyAssistant(pwndbg.disasm.arch.DisassemblyAssistant):
15 def __init__(self, architecture) -> None:
16 super().__init__(architecture)
17 self.architecture = architecture
18
19 def _is_condition_taken(
20 self, instruction: PwndbgInstruction, emu: Emulator | None
21 ) -> InstructionCondition:
22 # B-type instructions have two source registers that are compared
23 src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
24 # compressed instructions c.beqz and c.bnez only use one register operand.
25 if instruction.op_count(CS_OP_REG) > 1:
26 src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)
27 else:
28 src2_unsigned = 0
29
30 if self.architecture == "rv32":
31 src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000) << 1)
32 src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000) << 1)
33 elif self.architecture == "rv64":
34 src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000_00000000) << 1)
35 src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000_00000000) << 1)
36 else:
37 raise NotImplementedError(f"architecture '{self.architecture}' not implemented")
38
39 condition = {
40 RISCV_INS_BEQ: src1_signed == src2_signed,
41 RISCV_INS_BNE: src1_signed != src2_signed,
42 RISCV_INS_BLT: src1_signed < src2_signed,
43 RISCV_INS_BGE: src1_signed >= src2_signed,
44 RISCV_INS_BLTU: src1_unsigned < src2_unsigned,
45 RISCV_INS_BGEU: src1_unsigned >= src2_unsigned,
46 RISCV_INS_C_BEQZ: src1_signed == 0,
47 RISCV_INS_C_BNEZ: src1_signed != 0,
48 }.get(instruction.id, None)
49
50 if condition is None:
51 return InstructionCondition.UNDETERMINED
52
53 return InstructionCondition.TRUE if bool(condition) else InstructionCondition.FALSE
54
55 def condition(self, instruction: PwndbgInstruction, emu: Emulator) -> InstructionCondition:
56 """Checks if the current instruction is a jump that is taken.
57 Returns None if the instruction is executed unconditionally,
58 True if the instruction is executed for sure, False otherwise.
59 """
60 # JAL / JALR is unconditional
61 if RISCV_GRP_CALL in instruction.groups:
62 return InstructionCondition.UNDETERMINED
63
64 # We can't reason about anything except the current instruction
65 # as the comparison result is dependent on the register state.
66 if instruction.address != pwndbg.gdblib.regs.pc:
67 return InstructionCondition.UNDETERMINED
68
69 # Determine if the conditional jump is taken
70 if RISCV_GRP_BRANCH_RELATIVE in instruction.groups:
71 return self._is_condition_taken(instruction, emu)
72
73 return InstructionCondition.UNDETERMINED
74
75 def resolve_target(self, instruction: PwndbgInstruction, emu: Emulator | None, call=False):
76 """Return the address of the jump / conditional jump,
77 None if the next address is not dependent on instruction.
78 """
79 ptrmask = pwndbg.gdblib.arch.ptrmask
80 # JAL is unconditional and independent of current register status
81 if instruction.id in [RISCV_INS_JAL, RISCV_INS_C_JAL]:
82 # But that doesn't apply to ARM anyways :)
83 return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask
84
85 # We can't reason about anything except the current instruction
86 # as the comparison result is dependent on the register state.
87 if instruction.address != pwndbg.gdblib.regs.pc:
88 return None
89
90 # Determine if the conditional jump is taken
91 if RISCV_GRP_BRANCH_RELATIVE in instruction.groups and self._is_condition_taken(
92 instruction, emu
93 ):
94 return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask
95
96 # Determine the target address of the indirect jump
97 if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:
98 target = (
99 self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
100 + instruction.op_find(CS_OP_IMM, 1).imm
101 ) & ptrmask
102 # Clear the lowest bit without knowing the register width
103 return target ^ (target & 1)
104
105 return super().resolve_target(instruction, emu, call)
106
107
108 assistant_rv32 = DisassemblyAssistant("rv32")
109 assistant_rv64 = DisassemblyAssistant("rv64")
110
[end of pwndbg/disasm/riscv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/disasm/riscv.py b/pwndbg/disasm/riscv.py
--- a/pwndbg/disasm/riscv.py
+++ b/pwndbg/disasm/riscv.py
@@ -20,10 +20,10 @@
self, instruction: PwndbgInstruction, emu: Emulator | None
) -> InstructionCondition:
# B-type instructions have two source registers that are compared
- src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
+ src1_unsigned = instruction.op_find(CS_OP_REG, 1).before_value
# compressed instructions c.beqz and c.bnez only use one register operand.
if instruction.op_count(CS_OP_REG) > 1:
- src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)
+ src2_unsigned = instruction.op_find(CS_OP_REG, 2).before_value
else:
src2_unsigned = 0
@@ -96,7 +96,7 @@
# Determine the target address of the indirect jump
if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:
target = (
- self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)
+ instruction.op_find(CS_OP_REG, 1).before_value
+ instruction.op_find(CS_OP_IMM, 1).imm
) & ptrmask
# Clear the lowest bit without knowing the register width
|
{"golden_diff": "diff --git a/pwndbg/disasm/riscv.py b/pwndbg/disasm/riscv.py\n--- a/pwndbg/disasm/riscv.py\n+++ b/pwndbg/disasm/riscv.py\n@@ -20,10 +20,10 @@\n self, instruction: PwndbgInstruction, emu: Emulator | None\n ) -> InstructionCondition:\n # B-type instructions have two source registers that are compared\n- src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n+ src1_unsigned = instruction.op_find(CS_OP_REG, 1).before_value\n # compressed instructions c.beqz and c.bnez only use one register operand.\n if instruction.op_count(CS_OP_REG) > 1:\n- src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)\n+ src2_unsigned = instruction.op_find(CS_OP_REG, 2).before_value\n else:\n src2_unsigned = 0\n \n@@ -96,7 +96,7 @@\n # Determine the target address of the indirect jump\n if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:\n target = (\n- self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n+ instruction.op_find(CS_OP_REG, 1).before_value\n + instruction.op_find(CS_OP_IMM, 1).imm\n ) & ptrmask\n # Clear the lowest bit without knowing the register width\n", "issue": "`context` gives wrong jump target for `jalr ra, ra, -0x16` in RISC-V\nThe jump target shown as annotation in the disassembly window is wrong:\r\n\r\n```c++\r\npwndbg> si\r\npwndbg> set emulate on\r\npwndbg> context\r\n \u25ba 0x1008e <_start+4> jalr ra, ra, -0x16 <main+8>\r\npwndbg> si\r\npwndbg> context\r\n \u25ba 0x10074 <main> addi a7, zero, 0x40\r\n```\r\n\r\nInterestingly, `nearpc` shows the correct jump target:\r\n```c++\r\n \u25ba 0x1008e <_start+4> jalr ra, ra, -0x16 <main>\r\n ```\r\nA workaround is `set emulate off`.\r\n\r\n\r\n### test binary\r\n[Download test binary](https://github.com/pwndbg/pwndbg/files/15366965/riscv-emu-bug.bin.txt). I created it from:\r\n\r\n```asm\r\n# riscv32-unknown-linux-gnu-as -march=rv32imac2p0 -o riscv-emu-bug.o riscv-emu-bug.s\r\n# riscv32-unknown-linux-gnu-ld --no-relax -o riscv-emu-bug riscv-emu-bug.o\r\n\r\n.section .rodata\r\n\r\ngreeting: .asciz \"Hello world\\n\"\r\n.equ greetlen, . - greeting\r\n\r\n.section .text\r\nmain:\r\n li a7, 64 # write\r\n li a0, 1\r\n la a1, greeting\r\n li a2, greetlen\r\n ecall\r\n ret\r\n\r\n.global _start\r\n_start:\r\n call main\r\n li a7, 93 # exit\r\n li a0, 0\r\n ecall\r\n```\r\n\r\n\r\n### version information\r\n\r\npwndbg version: a1ddb3c0cd5e480710aa48f04ac560d659731bd8 (my fork)\r\n```\r\nPlatform: Linux-6.9.1-arch1-1-x86_64-with-glibc2.39\r\nOS: Arch Linux\r\nOS ABI: #1 SMP PREEMPT_DYNAMIC Fri, 17 May 2024 16:56:38 +0000\r\nArchitecture: x86_64\r\nEndian: little\r\nCharset: utf-8\r\nWidth: 119\r\nHeight: 58\r\nGdb: 14.2\r\nPython: 3.12.3 (main, Apr 23 2024, 09:16:07) [GCC 13.2.1 20240417]\r\nPwndbg: 2024.02.14 build: a1ddb3c0\r\nCapstone: 5.0.1280\r\nUnicorn: 2.0.1\r\nThis GDB was configured as follows:\r\n configure --host=x86_64-pc-linux-gnu --target=x86_64-pc-linux-gnu\r\n\t --with-auto-load-dir=$debugdir:$datadir/auto-load\r\n\t --with-auto-load-safe-path=$debugdir:$datadir/auto-load\r\n\t --with-expat\r\n\t --with-gdb-datadir=/build/share/gdb (relocatable)\r\n\t --with-jit-reader-dir=/build/lib/gdb (relocatable)\r\n\t --without-libunwind-ia64\r\n\t --with-lzma\r\n\t --without-babeltrace\r\n\t --without-intel-pt\r\n\t --with-xxhash\r\n\t --with-python=/usr\r\n\t --with-python-libdir=/usr/lib\r\n\t --with-debuginfod\r\n\t --with-curses\r\n\t --with-guile\r\n\t --without-amd-dbgapi\r\n\t --enable-source-highlight\r\n\t --enable-threading\r\n\t --enable-tui\r\n\t --with-system-readline\r\n\t --with-separate-debug-dir=/build/lib/debug (relocatable)\r\n\t --with-system-gdbinit=/etc/gdb/gdbinit\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom capstone import * # noqa: F403\nfrom capstone.riscv import * # noqa: F403\n\nimport pwndbg.disasm.arch\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.regs\nfrom pwndbg.disasm.instruction import InstructionCondition\nfrom pwndbg.disasm.instruction import PwndbgInstruction\nfrom pwndbg.emu.emulator import Emulator\n\n\nclass DisassemblyAssistant(pwndbg.disasm.arch.DisassemblyAssistant):\n def __init__(self, architecture) -> None:\n super().__init__(architecture)\n self.architecture = architecture\n\n def _is_condition_taken(\n self, instruction: PwndbgInstruction, emu: Emulator | None\n ) -> InstructionCondition:\n # B-type instructions have two source registers that are compared\n src1_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n # compressed instructions c.beqz and c.bnez only use one register operand.\n if instruction.op_count(CS_OP_REG) > 1:\n src2_unsigned = self.parse_register(instruction, instruction.op_find(CS_OP_REG, 2), emu)\n else:\n src2_unsigned = 0\n\n if self.architecture == \"rv32\":\n src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000) << 1)\n src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000) << 1)\n elif self.architecture == \"rv64\":\n src1_signed = src1_unsigned - ((src1_unsigned & 0x80000000_00000000) << 1)\n src2_signed = src2_unsigned - ((src2_unsigned & 0x80000000_00000000) << 1)\n else:\n raise NotImplementedError(f\"architecture '{self.architecture}' not implemented\")\n\n condition = {\n RISCV_INS_BEQ: src1_signed == src2_signed,\n RISCV_INS_BNE: src1_signed != src2_signed,\n RISCV_INS_BLT: src1_signed < src2_signed,\n RISCV_INS_BGE: src1_signed >= src2_signed,\n RISCV_INS_BLTU: src1_unsigned < src2_unsigned,\n RISCV_INS_BGEU: src1_unsigned >= src2_unsigned,\n RISCV_INS_C_BEQZ: src1_signed == 0,\n RISCV_INS_C_BNEZ: src1_signed != 0,\n }.get(instruction.id, None)\n\n if condition is None:\n return InstructionCondition.UNDETERMINED\n\n return InstructionCondition.TRUE if bool(condition) else InstructionCondition.FALSE\n\n def condition(self, instruction: PwndbgInstruction, emu: Emulator) -> InstructionCondition:\n \"\"\"Checks if the current instruction is a jump that is taken.\n Returns None if the instruction is executed unconditionally,\n True if the instruction is executed for sure, False otherwise.\n \"\"\"\n # JAL / JALR is unconditional\n if RISCV_GRP_CALL in instruction.groups:\n return InstructionCondition.UNDETERMINED\n\n # We can't reason about anything except the current instruction\n # as the comparison result is dependent on the register state.\n if instruction.address != pwndbg.gdblib.regs.pc:\n return InstructionCondition.UNDETERMINED\n\n # Determine if the conditional jump is taken\n if RISCV_GRP_BRANCH_RELATIVE in instruction.groups:\n return self._is_condition_taken(instruction, emu)\n\n return InstructionCondition.UNDETERMINED\n\n def resolve_target(self, instruction: PwndbgInstruction, emu: Emulator | None, call=False):\n \"\"\"Return the address of the jump / conditional jump,\n None if the next address is not dependent on instruction.\n \"\"\"\n ptrmask = pwndbg.gdblib.arch.ptrmask\n # JAL is unconditional and independent of current register status\n if instruction.id in [RISCV_INS_JAL, RISCV_INS_C_JAL]:\n # But that doesn't apply to ARM anyways :)\n return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask\n\n # We can't reason about anything except the current instruction\n # as the comparison result is dependent on the register state.\n if instruction.address != pwndbg.gdblib.regs.pc:\n return None\n\n # Determine if the conditional jump is taken\n if RISCV_GRP_BRANCH_RELATIVE in instruction.groups and self._is_condition_taken(\n instruction, emu\n ):\n return (instruction.address + instruction.op_find(CS_OP_IMM, 1).imm) & ptrmask\n\n # Determine the target address of the indirect jump\n if instruction.id in [RISCV_INS_JALR, RISCV_INS_C_JALR]:\n target = (\n self.parse_register(instruction, instruction.op_find(CS_OP_REG, 1), emu)\n + instruction.op_find(CS_OP_IMM, 1).imm\n ) & ptrmask\n # Clear the lowest bit without knowing the register width\n return target ^ (target & 1)\n\n return super().resolve_target(instruction, emu, call)\n\n\nassistant_rv32 = DisassemblyAssistant(\"rv32\")\nassistant_rv64 = DisassemblyAssistant(\"rv64\")\n", "path": "pwndbg/disasm/riscv.py"}]}
| 2,928 | 352 |
gh_patches_debug_25215
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-1971
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate Python 3.6 support
## 🚀 Feature
Deprecate python 3.6 -- support Python >= 3.7
______________________________________________________________________
#### Consider also to contribute to Kornia universe projects :)
<sub>
- [**Tutorials**](https://github.com/kornia/tutorials): our repository containing the tutorials.
</sub>
</issue>
<code>
[start of kornia/x/utils.py]
1 # TODO: remove the type: ignore in below after deprecating python 3.6
2 from dataclasses import dataclass, field # type: ignore
3 from enum import Enum
4 from typing import Dict
5
6 import torch.nn as nn
7
8 from kornia.metrics.average_meter import AverageMeter
9
10 # import yaml # type: ignore
11
12
13 class TrainerState(Enum):
14 STARTING = 0
15 TRAINING = 1
16 VALIDATE = 2
17 TERMINATE = 3
18
19
20 # NOTE: this class needs to be redefined according to the needed parameters.
21 @dataclass
22 class Configuration:
23 data_path: str = field(default="./", metadata={"help": "The input data directory."})
24 batch_size: int = field(default=1, metadata={"help": "The number of batches for the training dataloader."})
25 num_epochs: int = field(default=1, metadata={"help": "The number of epochs to run the training."})
26 lr: float = field(default=1e-3, metadata={"help": "The learning rate to be used for the optimize."})
27 output_path: str = field(default="./output", metadata={"help": "The output data directory."})
28 image_size: tuple = field(default=(224, 224), metadata={"help": "The input image size."})
29
30 # TODO: possibly remove because hydra already do this
31 # def __init__(self, **entries):
32 # for k, v in entries.items():
33 # self.__dict__[k] = Configuration(**v) if isinstance(v, dict) else v
34
35 # @classmethod
36 # def from_yaml(cls, config_file: str):
37 # """Create an instance of the configuration from a yaml file."""
38 # with open(config_file) as f:
39 # data = yaml.safe_load(f)
40 # return cls(**data)
41
42
43 class Lambda(nn.Module):
44 """Module to create a lambda function as nn.Module.
45
46 Args:
47 fcn: a pointer to any function.
48
49 Example:
50 >>> import torch
51 >>> import kornia as K
52 >>> fcn = Lambda(lambda x: K.geometry.resize(x, (32, 16)))
53 >>> fcn(torch.rand(1, 4, 64, 32)).shape
54 torch.Size([1, 4, 32, 16])
55 """
56
57 def __init__(self, fcn):
58 super().__init__()
59 self.fcn = fcn
60
61 def forward(self, x):
62 return self.fcn(x)
63
64
65 class StatsTracker:
66 """Stats tracker for computing metrics on the fly."""
67
68 def __init__(self) -> None:
69 self._stats: Dict[str, AverageMeter] = {}
70
71 @property
72 def stats(self):
73 return self._stats
74
75 def update(self, key: str, val: float, batch_size: int) -> None:
76 """Update the stats by the key value pair."""
77 if key not in self._stats:
78 self._stats[key] = AverageMeter()
79 self._stats[key].update(val, batch_size)
80
81 def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:
82 """Update the stats by the dict."""
83 for k, v in dic.items():
84 self.update(k, v, batch_size)
85
86 def __repr__(self) -> str:
87 return " ".join([f"{k.upper()}: {v.val:.2f} {v.val:.2f} " for k, v in self._stats.items()])
88
89 def as_dict(self) -> Dict[str, AverageMeter]:
90 """Return the dict format."""
91 return self._stats
92
[end of kornia/x/utils.py]
[start of setup.py]
1 # Welcome to the Kornia setup.py.
2 import sys
3
4 # Make sure that kornia is running on Python 3.6.0 or later
5 # (to avoid running into this bug: https://bugs.python.org/issue29246)
6
7 if sys.version_info < (3, 6, 0):
8 raise RuntimeError("Kornia requires Python 3.6.0 or later.")
9
10
11 from setuptools import setup
12
13 setup()
14
[end of setup.py]
[start of kornia/utils/_compat.py]
1 from typing import TYPE_CHECKING, List, Optional, Tuple
2
3 import torch
4 from torch import Tensor
5
6 from packaging import version
7
8
9 def torch_version() -> str:
10 """Parse the `torch.__version__` variable and removes +cu*/cpu."""
11 return torch.__version__.split('+')[0]
12
13
14 # TODO: replace by torch_version_ge``
15 def torch_version_geq(major, minor) -> bool:
16 _version = version.parse(torch_version())
17 return _version >= version.parse(f"{major}.{minor}")
18
19
20 def torch_version_lt(major: int, minor: int, patch: int) -> bool:
21 _version = version.parse(torch_version())
22 return _version < version.parse(f"{major}.{minor}.{patch}")
23
24
25 def torch_version_le(major: int, minor: int, patch: int) -> bool:
26 _version = version.parse(torch_version())
27 return _version <= version.parse(f"{major}.{minor}.{patch}")
28
29
30 def torch_version_ge(major: int, minor: int, patch: int) -> bool:
31 _version = version.parse(torch_version())
32 return _version >= version.parse(f"{major}.{minor}.{patch}")
33
34
35 if version.parse(torch_version()) > version.parse("1.7.1"):
36 # TODO: remove the type: ignore once Python 3.6 is deprecated.
37 # It turns out that Pytorch has no attribute `torch.linalg` for
38 # Python 3.6 / PyTorch 1.7.0, 1.7.1
39 from torch.linalg import qr as linalg_qr # type: ignore
40 else:
41 from torch import qr as linalg_qr # type: ignore # noqa: F401
42
43
44 if torch_version_ge(1, 10, 0):
45
46 if not TYPE_CHECKING:
47
48 def torch_meshgrid(tensors: List[Tensor], indexing: str):
49 return torch.meshgrid(tensors, indexing=indexing)
50
51 else:
52
53 if TYPE_CHECKING:
54
55 def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:
56 return torch.meshgrid(tensors)
57
58 else:
59
60 def torch_meshgrid(tensors: List[Tensor], indexing: str):
61 return torch.meshgrid(tensors)
62
63
64 if torch_version_ge(1, 10, 0):
65 torch_inference_mode = torch.inference_mode
66 else:
67 torch_inference_mode = torch.no_grad
68
[end of kornia/utils/_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py
--- a/kornia/utils/_compat.py
+++ b/kornia/utils/_compat.py
@@ -33,12 +33,9 @@
if version.parse(torch_version()) > version.parse("1.7.1"):
- # TODO: remove the type: ignore once Python 3.6 is deprecated.
- # It turns out that Pytorch has no attribute `torch.linalg` for
- # Python 3.6 / PyTorch 1.7.0, 1.7.1
- from torch.linalg import qr as linalg_qr # type: ignore
+ from torch.linalg import qr as linalg_qr
else:
- from torch import qr as linalg_qr # type: ignore # noqa: F401
+ from torch import qr as linalg_qr # noqa: F401
if torch_version_ge(1, 10, 0):
diff --git a/kornia/x/utils.py b/kornia/x/utils.py
--- a/kornia/x/utils.py
+++ b/kornia/x/utils.py
@@ -1,5 +1,4 @@
-# TODO: remove the type: ignore in below after deprecating python 3.6
-from dataclasses import dataclass, field # type: ignore
+from dataclasses import dataclass, field
from enum import Enum
from typing import Dict
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,11 +1,11 @@
# Welcome to the Kornia setup.py.
import sys
-# Make sure that kornia is running on Python 3.6.0 or later
+# Make sure that kornia is running on Python 3.7.0 or later
# (to avoid running into this bug: https://bugs.python.org/issue29246)
-if sys.version_info < (3, 6, 0):
- raise RuntimeError("Kornia requires Python 3.6.0 or later.")
+if sys.version_info < (3, 7, 0):
+ raise RuntimeError("Kornia requires Python 3.7.0 or later.")
from setuptools import setup
|
{"golden_diff": "diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py\n--- a/kornia/utils/_compat.py\n+++ b/kornia/utils/_compat.py\n@@ -33,12 +33,9 @@\n \n \n if version.parse(torch_version()) > version.parse(\"1.7.1\"):\n- # TODO: remove the type: ignore once Python 3.6 is deprecated.\n- # It turns out that Pytorch has no attribute `torch.linalg` for\n- # Python 3.6 / PyTorch 1.7.0, 1.7.1\n- from torch.linalg import qr as linalg_qr # type: ignore\n+ from torch.linalg import qr as linalg_qr\n else:\n- from torch import qr as linalg_qr # type: ignore # noqa: F401\n+ from torch import qr as linalg_qr # noqa: F401\n \n \n if torch_version_ge(1, 10, 0):\ndiff --git a/kornia/x/utils.py b/kornia/x/utils.py\n--- a/kornia/x/utils.py\n+++ b/kornia/x/utils.py\n@@ -1,5 +1,4 @@\n-# TODO: remove the type: ignore in below after deprecating python 3.6\n-from dataclasses import dataclass, field # type: ignore\n+from dataclasses import dataclass, field\n from enum import Enum\n from typing import Dict\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,11 +1,11 @@\n # Welcome to the Kornia setup.py.\n import sys\n \n-# Make sure that kornia is running on Python 3.6.0 or later\n+# Make sure that kornia is running on Python 3.7.0 or later\n # (to avoid running into this bug: https://bugs.python.org/issue29246)\n \n-if sys.version_info < (3, 6, 0):\n- raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n+if sys.version_info < (3, 7, 0):\n+ raise RuntimeError(\"Kornia requires Python 3.7.0 or later.\")\n \n \n from setuptools import setup\n", "issue": "Deprecate Python 3.6 support\n## \ud83d\ude80 Feature\r\n\r\nDeprecate python 3.6 -- support Python >= 3.7\r\n\r\n______________________________________________________________________\r\n\r\n#### Consider also to contribute to Kornia universe projects :)\r\n\r\n<sub>\r\n\r\n- [**Tutorials**](https://github.com/kornia/tutorials): our repository containing the tutorials.\r\n\r\n</sub>\r\n\n", "before_files": [{"content": "# TODO: remove the type: ignore in below after deprecating python 3.6\nfrom dataclasses import dataclass, field # type: ignore\nfrom enum import Enum\nfrom typing import Dict\n\nimport torch.nn as nn\n\nfrom kornia.metrics.average_meter import AverageMeter\n\n# import yaml # type: ignore\n\n\nclass TrainerState(Enum):\n STARTING = 0\n TRAINING = 1\n VALIDATE = 2\n TERMINATE = 3\n\n\n# NOTE: this class needs to be redefined according to the needed parameters.\n@dataclass\nclass Configuration:\n data_path: str = field(default=\"./\", metadata={\"help\": \"The input data directory.\"})\n batch_size: int = field(default=1, metadata={\"help\": \"The number of batches for the training dataloader.\"})\n num_epochs: int = field(default=1, metadata={\"help\": \"The number of epochs to run the training.\"})\n lr: float = field(default=1e-3, metadata={\"help\": \"The learning rate to be used for the optimize.\"})\n output_path: str = field(default=\"./output\", metadata={\"help\": \"The output data directory.\"})\n image_size: tuple = field(default=(224, 224), metadata={\"help\": \"The input image size.\"})\n\n # TODO: possibly remove because hydra already do this\n # def __init__(self, **entries):\n # for k, v in entries.items():\n # self.__dict__[k] = Configuration(**v) if isinstance(v, dict) else v\n\n # @classmethod\n # def from_yaml(cls, config_file: str):\n # \"\"\"Create an instance of the configuration from a yaml file.\"\"\"\n # with open(config_file) as f:\n # data = yaml.safe_load(f)\n # return cls(**data)\n\n\nclass Lambda(nn.Module):\n \"\"\"Module to create a lambda function as nn.Module.\n\n Args:\n fcn: a pointer to any function.\n\n Example:\n >>> import torch\n >>> import kornia as K\n >>> fcn = Lambda(lambda x: K.geometry.resize(x, (32, 16)))\n >>> fcn(torch.rand(1, 4, 64, 32)).shape\n torch.Size([1, 4, 32, 16])\n \"\"\"\n\n def __init__(self, fcn):\n super().__init__()\n self.fcn = fcn\n\n def forward(self, x):\n return self.fcn(x)\n\n\nclass StatsTracker:\n \"\"\"Stats tracker for computing metrics on the fly.\"\"\"\n\n def __init__(self) -> None:\n self._stats: Dict[str, AverageMeter] = {}\n\n @property\n def stats(self):\n return self._stats\n\n def update(self, key: str, val: float, batch_size: int) -> None:\n \"\"\"Update the stats by the key value pair.\"\"\"\n if key not in self._stats:\n self._stats[key] = AverageMeter()\n self._stats[key].update(val, batch_size)\n\n def update_from_dict(self, dic: Dict[str, float], batch_size: int) -> None:\n \"\"\"Update the stats by the dict.\"\"\"\n for k, v in dic.items():\n self.update(k, v, batch_size)\n\n def __repr__(self) -> str:\n return \" \".join([f\"{k.upper()}: {v.val:.2f} {v.val:.2f} \" for k, v in self._stats.items()])\n\n def as_dict(self) -> Dict[str, AverageMeter]:\n \"\"\"Return the dict format.\"\"\"\n return self._stats\n", "path": "kornia/x/utils.py"}, {"content": "# Welcome to the Kornia setup.py.\nimport sys\n\n# Make sure that kornia is running on Python 3.6.0 or later\n# (to avoid running into this bug: https://bugs.python.org/issue29246)\n\nif sys.version_info < (3, 6, 0):\n raise RuntimeError(\"Kornia requires Python 3.6.0 or later.\")\n\n\nfrom setuptools import setup\n\nsetup()\n", "path": "setup.py"}, {"content": "from typing import TYPE_CHECKING, List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom packaging import version\n\n\ndef torch_version() -> str:\n \"\"\"Parse the `torch.__version__` variable and removes +cu*/cpu.\"\"\"\n return torch.__version__.split('+')[0]\n\n\n# TODO: replace by torch_version_ge``\ndef torch_version_geq(major, minor) -> bool:\n _version = version.parse(torch_version())\n return _version >= version.parse(f\"{major}.{minor}\")\n\n\ndef torch_version_lt(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version < version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_le(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version <= version.parse(f\"{major}.{minor}.{patch}\")\n\n\ndef torch_version_ge(major: int, minor: int, patch: int) -> bool:\n _version = version.parse(torch_version())\n return _version >= version.parse(f\"{major}.{minor}.{patch}\")\n\n\nif version.parse(torch_version()) > version.parse(\"1.7.1\"):\n # TODO: remove the type: ignore once Python 3.6 is deprecated.\n # It turns out that Pytorch has no attribute `torch.linalg` for\n # Python 3.6 / PyTorch 1.7.0, 1.7.1\n from torch.linalg import qr as linalg_qr # type: ignore\nelse:\n from torch import qr as linalg_qr # type: ignore # noqa: F401\n\n\nif torch_version_ge(1, 10, 0):\n\n if not TYPE_CHECKING:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors, indexing=indexing)\n\nelse:\n\n if TYPE_CHECKING:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: Optional[str] = None) -> Tuple[Tensor, ...]:\n return torch.meshgrid(tensors)\n\n else:\n\n def torch_meshgrid(tensors: List[Tensor], indexing: str):\n return torch.meshgrid(tensors)\n\n\nif torch_version_ge(1, 10, 0):\n torch_inference_mode = torch.inference_mode\nelse:\n torch_inference_mode = torch.no_grad\n", "path": "kornia/utils/_compat.py"}]}
| 2,416 | 508 |
gh_patches_debug_20120
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2874
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Yahoo results for simplified Chinese search sometimes have the first character cut off
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
The latest commit as of this report, 75587d03a6766c87f2f1964c82080393559ed623
**How did you install SearXNG?**
```
make run
```
**What happened?**
As the title says. Current code redirects simplified search to the Hong Kong site, which actually uses traditional Chinese, but marks simplified pages with a `<span class="fz-s fc-obsidian ml-4">簡</span>` after the title text node, at the end:
```html
<h3 style="display:block;margin-top:24px;margin-bottom:2px;" class="title tc"><a style="line-height:26px" class=" d-ib fz-20 lh-26 td-hu tc va-bot mxw-100p" href="https://zh.wikipedia.org/wiki/%E7%B9%81%E4%BD%93%E5%AD%97" referrerpolicy="origin" target="_blank" data-439="6515fd318fbac" aria-label="繁体字 - 维基百科,自由的百科全书"><span class=" s-url d-ib p-abs t-0 l-0 fz-14 lh-20 fc-obsidian wr-bw ls-n pb-4">zh.wikipedia.org<span class=" fc-pewter"> › wiki › <b>繁体字</b></span></span>繁体字 - 维基百科,自由的百科全书<span class="fz-s fc-obsidian ml-4">簡</span></a></h3>
```
This character is extracted by `.xpath('span')` in the first line, which, without knowledge of this situation, actually extracted text from two `span`s, making the second line substring off by one:
https://github.com/searxng/searxng/blob/75587d03a6766c87f2f1964c82080393559ed623/searx/engines/yahoo.py#L139-L140
**How To Reproduce**
Search for some simplified Chinese keyword and look for a Yahoo result with 簡 being the last character of its title.
**Expected behavior**
That mark should be ignored, eliminating the off by one.
**Screenshots & Logs**
<img width="664" alt="problematic search result" src="https://github.com/searxng/searxng/assets/4474501/f8ea1959-0487-4fed-b93f-c3e9a061eaea">
You can see that the last breadcrumb is 繁体字, but the title says 体字, cutting off 繁; while there is an extraneous 簡 at the end.
**Additional context**
```diff
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 0fdeacec2..4858e7e3f 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -136,8 +136,9 @@ def response(resp):
title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
if title is None:
continue
- offset = len(extract_text(title.xpath('span')))
- title = extract_text(title)[offset:]
+ offset = len(extract_text(title.xpath('span[1]')))
+ offset_right = -len(extract_text(title.xpath('span[2]'))) or None
+ title = extract_text(title)[offset:offset_right]
content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
content = extract_text(content, allow_none=True)
```
This is an apparent fix, but I'm not sure it's worth it.
</issue>
<code>
[start of searx/engines/yahoo.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Yahoo Search (Web)
4
5 Languages are supported by mapping the language to a domain. If domain is not
6 found in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.
7
8 """
9
10 from urllib.parse import (
11 unquote,
12 urlencode,
13 )
14 from lxml import html
15
16 from searx.utils import (
17 eval_xpath_getindex,
18 eval_xpath_list,
19 extract_text,
20 )
21 from searx.enginelib.traits import EngineTraits
22
23 traits: EngineTraits
24
25 # about
26 about = {
27 "website": 'https://search.yahoo.com/',
28 "wikidata_id": None,
29 "official_api_documentation": 'https://developer.yahoo.com/api/',
30 "use_official_api": False,
31 "require_api_key": False,
32 "results": 'HTML',
33 }
34
35 # engine dependent config
36 categories = ['general', 'web']
37 paging = True
38 time_range_support = True
39 # send_accept_language_header = True
40
41 time_range_dict = {
42 'day': ('1d', 'd'),
43 'week': ('1w', 'w'),
44 'month': ('1m', 'm'),
45 }
46
47 lang2domain = {
48 'zh_chs': 'hk.search.yahoo.com',
49 'zh_cht': 'tw.search.yahoo.com',
50 'any': 'search.yahoo.com',
51 'en': 'search.yahoo.com',
52 'bg': 'search.yahoo.com',
53 'cs': 'search.yahoo.com',
54 'da': 'search.yahoo.com',
55 'el': 'search.yahoo.com',
56 'et': 'search.yahoo.com',
57 'he': 'search.yahoo.com',
58 'hr': 'search.yahoo.com',
59 'ja': 'search.yahoo.com',
60 'ko': 'search.yahoo.com',
61 'sk': 'search.yahoo.com',
62 'sl': 'search.yahoo.com',
63 }
64 """Map language to domain"""
65
66 locale_aliases = {
67 'zh': 'zh_Hans',
68 'zh-HK': 'zh_Hans',
69 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com
70 'zh-TW': 'zh_Hant',
71 }
72
73
74 def request(query, params):
75 """build request"""
76
77 lang = locale_aliases.get(params['language'], None)
78 if not lang:
79 lang = params['language'].split('-')[0]
80 lang = traits.get_language(lang, traits.all_locale)
81
82 offset = (params['pageno'] - 1) * 7 + 1
83 age, btf = time_range_dict.get(params['time_range'], ('', ''))
84
85 args = urlencode(
86 {
87 'p': query,
88 'ei': 'UTF-8',
89 'fl': 1,
90 'vl': 'lang_' + lang,
91 'btf': btf,
92 'fr2': 'time',
93 'age': age,
94 'b': offset,
95 'xargs': 0,
96 }
97 )
98
99 domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)
100 params['url'] = 'https://%s/search?%s' % (domain, args)
101 return params
102
103
104 def parse_url(url_string):
105 """remove yahoo-specific tracking-url"""
106
107 endings = ['/RS', '/RK']
108 endpositions = []
109 start = url_string.find('http', url_string.find('/RU=') + 1)
110
111 for ending in endings:
112 endpos = url_string.rfind(ending)
113 if endpos > -1:
114 endpositions.append(endpos)
115
116 if start == 0 or len(endpositions) == 0:
117 return url_string
118
119 end = min(endpositions)
120 return unquote(url_string[start:end])
121
122
123 def response(resp):
124 """parse response"""
125
126 results = []
127 dom = html.fromstring(resp.text)
128
129 # parse results
130 for result in eval_xpath_list(dom, '//div[contains(@class,"algo-sr")]'):
131 url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)
132 if url is None:
133 continue
134 url = parse_url(url)
135
136 title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
137 if title is None:
138 continue
139 offset = len(extract_text(title.xpath('span')))
140 title = extract_text(title)[offset:]
141
142 content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
143 content = extract_text(content, allow_none=True)
144
145 # append result
146 results.append({'url': url, 'title': title, 'content': content})
147
148 for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]//table//a'):
149 # append suggestion
150 results.append({'suggestion': extract_text(suggestion)})
151
152 return results
153
154
155 def fetch_traits(engine_traits: EngineTraits):
156 """Fetch languages from yahoo"""
157
158 # pylint: disable=import-outside-toplevel
159 import babel
160 from searx import network
161 from searx.locales import language_tag
162
163 engine_traits.all_locale = 'any'
164
165 resp = network.get('https://search.yahoo.com/preferences/languages')
166 if not resp.ok:
167 print("ERROR: response from peertube is not OK.")
168
169 dom = html.fromstring(resp.text)
170 offset = len('lang_')
171
172 eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}
173
174 for val in eval_xpath_list(dom, '//div[contains(@class, "lang-item")]/input/@value'):
175 eng_tag = val[offset:]
176
177 try:
178 sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))
179 except babel.UnknownLocaleError:
180 print('ERROR: unknown language --> %s' % eng_tag)
181 continue
182
183 conflict = engine_traits.languages.get(sxng_tag)
184 if conflict:
185 if conflict != eng_tag:
186 print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
187 continue
188 engine_traits.languages[sxng_tag] = eng_tag
189
[end of searx/engines/yahoo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -133,12 +133,7 @@
continue
url = parse_url(url)
- title = eval_xpath_getindex(result, './/h3/a', 0, default=None)
- if title is None:
- continue
- offset = len(extract_text(title.xpath('span')))
- title = extract_text(title)[offset:]
-
+ title = extract_text(result.xpath('.//h3//a/@aria-label'))
content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
content = extract_text(content, allow_none=True)
@@ -164,7 +159,7 @@
resp = network.get('https://search.yahoo.com/preferences/languages')
if not resp.ok:
- print("ERROR: response from peertube is not OK.")
+ print("ERROR: response from yahoo is not OK.")
dom = html.fromstring(resp.text)
offset = len('lang_')
|
{"golden_diff": "diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py\n--- a/searx/engines/yahoo.py\n+++ b/searx/engines/yahoo.py\n@@ -133,12 +133,7 @@\n continue\n url = parse_url(url)\n \n- title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\n- if title is None:\n- continue\n- offset = len(extract_text(title.xpath('span')))\n- title = extract_text(title)[offset:]\n-\n+ title = extract_text(result.xpath('.//h3//a/@aria-label'))\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n \n@@ -164,7 +159,7 @@\n \n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n- print(\"ERROR: response from peertube is not OK.\")\n+ print(\"ERROR: response from yahoo is not OK.\")\n \n dom = html.fromstring(resp.text)\n offset = len('lang_')\n", "issue": "Bug: Yahoo results for simplified Chinese search sometimes have the first character cut off\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n\r\nThe latest commit as of this report, 75587d03a6766c87f2f1964c82080393559ed623\r\n\r\n**How did you install SearXNG?**\r\n\r\n```\r\nmake run\r\n```\r\n\r\n**What happened?**\r\n\r\nAs the title says. Current code redirects simplified search to the Hong Kong site, which actually uses traditional Chinese, but marks simplified pages with a `<span class=\"fz-s fc-obsidian ml-4\">\u7c21</span>` after the title text node, at the end:\r\n\r\n```html\r\n<h3 style=\"display:block;margin-top:24px;margin-bottom:2px;\" class=\"title tc\"><a style=\"line-height:26px\" class=\" d-ib fz-20 lh-26 td-hu tc va-bot mxw-100p\" href=\"https://zh.wikipedia.org/wiki/%E7%B9%81%E4%BD%93%E5%AD%97\" referrerpolicy=\"origin\" target=\"_blank\" data-439=\"6515fd318fbac\" aria-label=\"\u7e41\u4f53\u5b57 - \u7ef4\u57fa\u767e\u79d1\uff0c\u81ea\u7531\u7684\u767e\u79d1\u5168\u4e66\"><span class=\" s-url d-ib p-abs t-0 l-0 fz-14 lh-20 fc-obsidian wr-bw ls-n pb-4\">zh.wikipedia.org<span class=\" fc-pewter\"> \u203a wiki \u203a <b>\u7e41\u4f53\u5b57</b></span></span>\u7e41\u4f53\u5b57 - \u7ef4\u57fa\u767e\u79d1\uff0c\u81ea\u7531\u7684\u767e\u79d1\u5168\u4e66<span class=\"fz-s fc-obsidian ml-4\">\u7c21</span></a></h3>\r\n```\r\n\r\nThis character is extracted by `.xpath('span')` in the first line, which, without knowledge of this situation, actually extracted text from two `span`s, making the second line substring off by one:\r\n\r\nhttps://github.com/searxng/searxng/blob/75587d03a6766c87f2f1964c82080393559ed623/searx/engines/yahoo.py#L139-L140\r\n\r\n**How To Reproduce**\r\n\r\nSearch for some simplified Chinese keyword and look for a Yahoo result with \u7c21 being the last character of its title.\r\n\r\n**Expected behavior**\r\n\r\nThat mark should be ignored, eliminating the off by one.\r\n\r\n**Screenshots & Logs**\r\n\r\n<img width=\"664\" alt=\"problematic search result\" src=\"https://github.com/searxng/searxng/assets/4474501/f8ea1959-0487-4fed-b93f-c3e9a061eaea\">\r\n\r\nYou can see that the last breadcrumb is \u7e41\u4f53\u5b57, but the title says \u4f53\u5b57, cutting off \u7e41; while there is an extraneous \u7c21 at the end.\r\n\r\n**Additional context**\r\n\r\n```diff\r\ndiff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py\r\nindex 0fdeacec2..4858e7e3f 100644\r\n--- a/searx/engines/yahoo.py\r\n+++ b/searx/engines/yahoo.py\r\n@@ -136,8 +136,9 @@ def response(resp):\r\n title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\r\n if title is None:\r\n continue\r\n- offset = len(extract_text(title.xpath('span')))\r\n- title = extract_text(title)[offset:]\r\n+ offset = len(extract_text(title.xpath('span[1]')))\r\n+ offset_right = -len(extract_text(title.xpath('span[2]'))) or None\r\n+ title = extract_text(title)[offset:offset_right]\r\n\r\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\r\n content = extract_text(content, allow_none=True)\r\n```\r\n\r\nThis is an apparent fix, but I'm not sure it's worth it.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Yahoo Search (Web)\n\nLanguages are supported by mapping the language to a domain. If domain is not\nfound in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.\n\n\"\"\"\n\nfrom urllib.parse import (\n unquote,\n urlencode,\n)\nfrom lxml import html\n\nfrom searx.utils import (\n eval_xpath_getindex,\n eval_xpath_list,\n extract_text,\n)\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# about\nabout = {\n \"website\": 'https://search.yahoo.com/',\n \"wikidata_id\": None,\n \"official_api_documentation\": 'https://developer.yahoo.com/api/',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = True\n# send_accept_language_header = True\n\ntime_range_dict = {\n 'day': ('1d', 'd'),\n 'week': ('1w', 'w'),\n 'month': ('1m', 'm'),\n}\n\nlang2domain = {\n 'zh_chs': 'hk.search.yahoo.com',\n 'zh_cht': 'tw.search.yahoo.com',\n 'any': 'search.yahoo.com',\n 'en': 'search.yahoo.com',\n 'bg': 'search.yahoo.com',\n 'cs': 'search.yahoo.com',\n 'da': 'search.yahoo.com',\n 'el': 'search.yahoo.com',\n 'et': 'search.yahoo.com',\n 'he': 'search.yahoo.com',\n 'hr': 'search.yahoo.com',\n 'ja': 'search.yahoo.com',\n 'ko': 'search.yahoo.com',\n 'sk': 'search.yahoo.com',\n 'sl': 'search.yahoo.com',\n}\n\"\"\"Map language to domain\"\"\"\n\nlocale_aliases = {\n 'zh': 'zh_Hans',\n 'zh-HK': 'zh_Hans',\n 'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com\n 'zh-TW': 'zh_Hant',\n}\n\n\ndef request(query, params):\n \"\"\"build request\"\"\"\n\n lang = locale_aliases.get(params['language'], None)\n if not lang:\n lang = params['language'].split('-')[0]\n lang = traits.get_language(lang, traits.all_locale)\n\n offset = (params['pageno'] - 1) * 7 + 1\n age, btf = time_range_dict.get(params['time_range'], ('', ''))\n\n args = urlencode(\n {\n 'p': query,\n 'ei': 'UTF-8',\n 'fl': 1,\n 'vl': 'lang_' + lang,\n 'btf': btf,\n 'fr2': 'time',\n 'age': age,\n 'b': offset,\n 'xargs': 0,\n }\n )\n\n domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)\n params['url'] = 'https://%s/search?%s' % (domain, args)\n return params\n\n\ndef parse_url(url_string):\n \"\"\"remove yahoo-specific tracking-url\"\"\"\n\n endings = ['/RS', '/RK']\n endpositions = []\n start = url_string.find('http', url_string.find('/RU=') + 1)\n\n for ending in endings:\n endpos = url_string.rfind(ending)\n if endpos > -1:\n endpositions.append(endpos)\n\n if start == 0 or len(endpositions) == 0:\n return url_string\n\n end = min(endpositions)\n return unquote(url_string[start:end])\n\n\ndef response(resp):\n \"\"\"parse response\"\"\"\n\n results = []\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in eval_xpath_list(dom, '//div[contains(@class,\"algo-sr\")]'):\n url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)\n if url is None:\n continue\n url = parse_url(url)\n\n title = eval_xpath_getindex(result, './/h3/a', 0, default=None)\n if title is None:\n continue\n offset = len(extract_text(title.xpath('span')))\n title = extract_text(title)[offset:]\n\n content = eval_xpath_getindex(result, './/div[contains(@class, \"compText\")]', 0, default='')\n content = extract_text(content, allow_none=True)\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n for suggestion in eval_xpath_list(dom, '//div[contains(@class, \"AlsoTry\")]//table//a'):\n # append suggestion\n results.append({'suggestion': extract_text(suggestion)})\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"Fetch languages from yahoo\"\"\"\n\n # pylint: disable=import-outside-toplevel\n import babel\n from searx import network\n from searx.locales import language_tag\n\n engine_traits.all_locale = 'any'\n\n resp = network.get('https://search.yahoo.com/preferences/languages')\n if not resp.ok:\n print(\"ERROR: response from peertube is not OK.\")\n\n dom = html.fromstring(resp.text)\n offset = len('lang_')\n\n eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}\n\n for val in eval_xpath_list(dom, '//div[contains(@class, \"lang-item\")]/input/@value'):\n eng_tag = val[offset:]\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))\n except babel.UnknownLocaleError:\n print('ERROR: unknown language --> %s' % eng_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != eng_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, eng_tag))\n continue\n engine_traits.languages[sxng_tag] = eng_tag\n", "path": "searx/engines/yahoo.py"}]}
| 3,360 | 269 |
gh_patches_debug_8954
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-1028
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
complete `.zenodo.json` metadata
Our `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):
https://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4
I feel like adding the contributors that are currently not listed to that file in a PR, and making that a "custom" (to add new contributors also to the zenodo metadata). I think that's a nice "reward" for contributions. Any objections?
</issue>
<code>
[start of setup.py]
1 """Setup MNE-BIDS."""
2 import sys
3
4 from setuptools import setup
5
6 # Give setuptools a hint to complain if it's too old a version
7 SETUP_REQUIRES = ["setuptools >= 46.4.0"]
8 # This enables setuptools to install wheel on-the-fly
9 SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
10
11 version = None
12 with open('mne_bids/__init__.py', 'r') as fid:
13 for line in fid:
14 line = line.strip()
15 if line.startswith('__version__ = '):
16 version = line.split(' = ')[1].split('#')[0].strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 if __name__ == "__main__":
23 setup(
24 version=version,
25 setup_requires=SETUP_REQUIRES,
26 )
27
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
"""Setup MNE-BIDS."""
+import os
import sys
from setuptools import setup
@@ -9,7 +10,7 @@
SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
version = None
-with open('mne_bids/__init__.py', 'r') as fid:
+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('__version__ = '):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n \"\"\"Setup MNE-BIDS.\"\"\"\n+import os\n import sys\n \n from setuptools import setup\n@@ -9,7 +10,7 @@\n SETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n \n version = None\n-with open('mne_bids/__init__.py', 'r') as fid:\n+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n", "issue": "complete `.zenodo.json` metadata\nOur `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4\r\n\r\nI feel like adding the contributors that are currently not listed to that file in a PR, and making that a \"custom\" (to add new contributors also to the zenodo metadata). I think that's a nice \"reward\" for contributions. Any objections?\n", "before_files": [{"content": "\"\"\"Setup MNE-BIDS.\"\"\"\nimport sys\n\nfrom setuptools import setup\n\n# Give setuptools a hint to complain if it's too old a version\nSETUP_REQUIRES = [\"setuptools >= 46.4.0\"]\n# This enables setuptools to install wheel on-the-fly\nSETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n\nversion = None\nwith open('mne_bids/__init__.py', 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n version = line.split(' = ')[1].split('#')[0].strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\nif __name__ == \"__main__\":\n setup(\n version=version,\n setup_requires=SETUP_REQUIRES,\n )\n", "path": "setup.py"}]}
| 916 | 146 |
gh_patches_debug_60760
|
rasdani/github-patches
|
git_diff
|
coala__coala-bears-310
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GitCommitBear: body_line_length shall be 72 and not 73
The standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.
</issue>
<code>
[start of bears/vcs/git/GitCommitBear.py]
1 import re
2 import shutil
3 import os
4
5 from coalib.bears.GlobalBear import GlobalBear
6 from coalib.misc.ContextManagers import change_directory
7 from coalib.misc.Shell import run_shell_command
8 from coalib.results.Result import Result
9
10
11 class GitCommitBear(GlobalBear):
12
13 @classmethod
14 def check_prerequisites(cls):
15 if shutil.which("git") is None:
16 return "git is not installed."
17 else:
18 return True
19
20 def run(self,
21 shortlog_length: int=50,
22 body_line_length: int=73,
23 force_body: bool=False,
24 allow_empty_commit_message: bool=False,
25 shortlog_regex: str="",
26 shortlog_trailing_period: bool=None):
27 """
28 Checks the current git commit message at HEAD.
29
30 This bear ensures that the shortlog and body do not exceed a given
31 line-length and that a newline lies between them.
32
33 :param shortlog_length: The maximum length of the shortlog.
34 The shortlog is the first line of
35 the commit message. The newline
36 character at end does not count to
37 the length.
38 :param body_line_length: The maximum line-length of the body.
39 The newline character at each line
40 end does not count to the length.
41 :param force_body: Whether a body shall exist or not.
42 :param allow_empty_commit_message: Whether empty commit messages are
43 allowed or not.
44 :param shortlog_regex: A regex to check the shortlog with.
45 A full match of this regex is then
46 required. Passing an empty string
47 disable the regex-check.
48 :param shortlog_trailing_period: Whether a dot shall be enforced at
49 the end of the shortlog line.
50 Providing ``None`` means
51 "doesn't care".
52 """
53 with change_directory(self.get_config_dir() or os.getcwd()):
54 stdout, stderr = run_shell_command("git log -1 --pretty=%B")
55
56 if stderr:
57 self.err("git:", repr(stderr))
58 return
59
60 stdout = stdout.rstrip("\n").splitlines()
61
62 if len(stdout) == 0:
63 if not allow_empty_commit_message:
64 yield Result(self, "HEAD commit has no message.")
65 return
66
67 yield from self.check_shortlog(shortlog_length,
68 shortlog_regex,
69 shortlog_trailing_period,
70 stdout[0])
71 yield from self.check_body(body_line_length, force_body, stdout[1:])
72
73 def check_shortlog(self,
74 shortlog_length,
75 regex,
76 shortlog_trailing_period,
77 shortlog):
78 """
79 Checks the given shortlog.
80
81 :param shortlog_length: The maximum length of the shortlog.
82 The newline character at end does not
83 count to the length.
84 :param regex: A regex to check the shortlog with.
85 :param shortlog_trailing_period: Whether a dot shall be enforced at end
86 end or not (or ``None`` for "don't
87 care").
88 :param shortlog: The shortlog message string.
89 """
90 diff = len(shortlog) - shortlog_length
91 if diff > 0:
92 yield Result(self,
93 "Shortlog of HEAD commit is {} character(s) longer "
94 "than the limit ({} > {}).".format(
95 diff, len(shortlog), shortlog_length))
96
97 if (shortlog[-1] != ".") == shortlog_trailing_period:
98 yield Result(self,
99 "Shortlog of HEAD commit contains no period at end."
100 if shortlog_trailing_period else
101 "Shortlog of HEAD commit contains a period at end.")
102
103 if regex != "":
104 match = re.match(regex, shortlog)
105 # fullmatch() inside re-module exists sadly since 3.4, but we
106 # support 3.3 so we need to check that the regex matched completely
107 # ourselves.
108 if not match or match.end() != len(shortlog):
109 yield Result(
110 self,
111 "Shortlog of HEAD commit does not match given regex.")
112
113 def check_body(self, body_line_length, force_body, body):
114 """
115 Checks the given commit body.
116
117 :param body_line_length: The maximum line-length of the body. The
118 newline character at each line end does not
119 count to the length.
120 :param force_body: Whether a body shall exist or not.
121 :param body: The commit body splitted by lines.
122 """
123 if len(body) == 0:
124 if force_body:
125 yield Result(self, "No commit message body at HEAD.")
126 return
127
128 if body[0] != "":
129 yield Result(self, "No newline between shortlog and body at HEAD.")
130 return
131
132 if any(len(line) > body_line_length for line in body[1:]):
133 yield Result(self, "Body of HEAD commit contains too long lines.")
134
[end of bears/vcs/git/GitCommitBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py
--- a/bears/vcs/git/GitCommitBear.py
+++ b/bears/vcs/git/GitCommitBear.py
@@ -19,7 +19,7 @@
def run(self,
shortlog_length: int=50,
- body_line_length: int=73,
+ body_line_length: int=72,
force_body: bool=False,
allow_empty_commit_message: bool=False,
shortlog_regex: str="",
|
{"golden_diff": "diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py\n--- a/bears/vcs/git/GitCommitBear.py\n+++ b/bears/vcs/git/GitCommitBear.py\n@@ -19,7 +19,7 @@\n \n def run(self,\n shortlog_length: int=50,\n- body_line_length: int=73,\n+ body_line_length: int=72,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n", "issue": "GitCommitBear: body_line_length shall be 72 and not 73\nThe standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.\n\n", "before_files": [{"content": "import re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=73,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n :param shortlog_regex: A regex to check the shortlog with.\n A full match of this regex is then\n required. Passing an empty string\n disable the regex-check.\n :param shortlog_trailing_period: Whether a dot shall be enforced at\n the end of the shortlog line.\n Providing ``None`` means\n \"doesn't care\".\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length,\n shortlog_regex,\n shortlog_trailing_period,\n stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self,\n shortlog_length,\n regex,\n shortlog_trailing_period,\n shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog: The shortlog message string.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of HEAD commit is {} character(s) longer \"\n \"than the limit ({} > {}).\".format(\n diff, len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if regex != \"\":\n match = re.match(regex, shortlog)\n # fullmatch() inside re-module exists sadly since 3.4, but we\n # support 3.3 so we need to check that the regex matched completely\n # ourselves.\n if not match or match.end() != len(shortlog):\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}]}
| 1,942 | 126 |
gh_patches_debug_8972
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-2367
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] pyro.distributions.InverseGamma.sample does not work on GPU
### Issue Description
`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:
```
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
```
I think the problem is in `__init__`:
```python
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
```
The argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.
### Environment
Ubuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0
### Code Snippet
```python
>>> import torch
>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution
>>> from pyro.distributions.transforms import PowerTransform
>>>
>>> concentration = torch.tensor(1.0).to("cuda")
>>> rate = torch.tensor(1.0).to("cuda")
>>>
>>> # InverseGamma.sample fails with an error
>>> InverseGamma(concentration, rate).sample()
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
>>>
>>> # The equivalent TransformedDistribution is fine
>>> TransformedDistribution(
... Gamma(concentration, rate),
... PowerTransform(torch.tensor(-1.0).to("cuda")),
... ).sample()
tensor(0.5707, device='cuda:0')
```
</issue>
<code>
[start of pyro/distributions/inverse_gamma.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from torch.distributions import constraints
5 from torch.distributions.transforms import PowerTransform
6 from pyro.distributions.torch import Gamma, TransformedDistribution
7
8
9 class InverseGamma(TransformedDistribution):
10 r"""
11 Creates an inverse-gamma distribution parameterized by
12 `concentration` and `rate`.
13
14 X ~ Gamma(concentration, rate)
15 Y = 1/X ~ InverseGamma(concentration, rate)
16
17 :param torch.Tensor concentration: the concentration parameter (i.e. alpha).
18 :param torch.Tensor rate: the rate parameter (i.e. beta).
19 """
20 arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}
21 support = constraints.positive
22 has_rsample = True
23
24 def __init__(self, concentration, rate, validate_args=None):
25 base_dist = Gamma(concentration, rate)
26 super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
27
28 def expand(self, batch_shape, _instance=None):
29 new = self._get_checked_instance(InverseGamma, _instance)
30 return super().expand(batch_shape, _instance=new)
31
32 @property
33 def concentration(self):
34 return self.base_dist.concentration
35
36 @property
37 def rate(self):
38 return self.base_dist.rate
39
[end of pyro/distributions/inverse_gamma.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py
--- a/pyro/distributions/inverse_gamma.py
+++ b/pyro/distributions/inverse_gamma.py
@@ -23,7 +23,8 @@
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),
+ validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(InverseGamma, _instance)
|
{"golden_diff": "diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py\n--- a/pyro/distributions/inverse_gamma.py\n+++ b/pyro/distributions/inverse_gamma.py\n@@ -23,7 +23,8 @@\n \n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),\n+ validate_args=validate_args)\n \n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n", "issue": "[bug] pyro.distributions.InverseGamma.sample does not work on GPU\n### Issue Description\r\n`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:\r\n```\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n```\r\n\r\nI think the problem is in `__init__`:\r\n```python\r\n def __init__(self, concentration, rate, validate_args=None):\r\n base_dist = Gamma(concentration, rate)\r\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\r\n```\r\nThe argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.\r\n\r\n### Environment\r\nUbuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0\r\n\r\n### Code Snippet\r\n```python\r\n>>> import torch\r\n>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution\r\n>>> from pyro.distributions.transforms import PowerTransform\r\n>>>\r\n>>> concentration = torch.tensor(1.0).to(\"cuda\")\r\n>>> rate = torch.tensor(1.0).to(\"cuda\")\r\n>>> \r\n>>> # InverseGamma.sample fails with an error\r\n>>> InverseGamma(concentration, rate).sample()\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n>>> \r\n>>> # The equivalent TransformedDistribution is fine\r\n>>> TransformedDistribution(\r\n... Gamma(concentration, rate),\r\n... PowerTransform(torch.tensor(-1.0).to(\"cuda\")),\r\n... ).sample()\r\ntensor(0.5707, device='cuda:0')\r\n```\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import PowerTransform\nfrom pyro.distributions.torch import Gamma, TransformedDistribution\n\n\nclass InverseGamma(TransformedDistribution):\n r\"\"\"\n Creates an inverse-gamma distribution parameterized by\n `concentration` and `rate`.\n\n X ~ Gamma(concentration, rate)\n Y = 1/X ~ InverseGamma(concentration, rate)\n\n :param torch.Tensor concentration: the concentration parameter (i.e. alpha).\n :param torch.Tensor rate: the rate parameter (i.e. beta).\n \"\"\"\n arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}\n support = constraints.positive\n has_rsample = True\n\n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def concentration(self):\n return self.base_dist.concentration\n\n @property\n def rate(self):\n return self.base_dist.rate\n", "path": "pyro/distributions/inverse_gamma.py"}]}
| 1,332 | 160 |
gh_patches_debug_20498
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GET on default bucket increases quota usage
While investigating #1225, I discovered that *every* GET request to e.g. `/buckets/default/collections/some-collection` causes quota usage to increase. For example,
```
select * from records where collection_id='quota';
id | parent_id
| collection_id | last_modified |
data
-----------------+--------------------------------------------------------------
-------------------+---------------+----------------------------+---------------
----------------------------------------------------
bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5
| quota | 2017-05-22 18:07:14.715782 | {"record_count
": 0, "storage_size": 1745, "collection_count": 13}
collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n
ewest-collection-3 | quota | 2017-05-22 18:07:14.716923 | {"record_count
": 0, "storage_size": 540}
```
```
> http --auth 'user:pass6' 'localhost:8888/v1/buckets/default/collections/a-newest-collection-3'
HTTP/1.1 200 OK
..... more junk .....
```
```
select * from records where collection_id='quota';
id | parent_id
| collection_id | last_modified |
data
-----------------+--------------------------------------------------------------
-------------------+---------------+----------------------------+---------------
----------------------------------------------------
bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5
| quota | 2017-05-22 18:56:52.448115 | {"record_count
": 0, "storage_size": 1880, "collection_count": 14}
collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n
ewest-collection-3 | quota | 2017-05-22 18:56:52.4493 | {"record_count
": 0, "storage_size": 600}
```
This appears to be partly because every request that goes to the default bucket "creates" a bucket and collection.
</issue>
<code>
[start of kinto/plugins/default_bucket/__init__.py]
1 import uuid
2
3 from pyramid import httpexceptions
4 from pyramid.settings import asbool
5 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
6
7 from kinto.core.errors import raise_invalid
8 from kinto.core.events import ACTIONS
9 from kinto.core.utils import (
10 build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)
11
12 from kinto.authorization import RouteFactory
13 from kinto.views.buckets import Bucket
14 from kinto.views.collections import Collection
15
16
17 def create_bucket(request, bucket_id):
18 """Create a bucket if it doesn't exists."""
19 bucket_put = (request.method.lower() == 'put' and
20 request.path.endswith('buckets/default'))
21 # Do nothing if current request will already create the bucket.
22 if bucket_put:
23 return
24
25 # Do not intent to create multiple times per request (e.g. in batch).
26 already_created = request.bound_data.setdefault('buckets', {})
27 if bucket_id in already_created:
28 return
29
30 bucket_uri = instance_uri(request, 'bucket', id=bucket_id)
31 bucket = resource_create_object(request=request,
32 resource_cls=Bucket,
33 uri=bucket_uri)
34 already_created[bucket_id] = bucket
35
36
37 def create_collection(request, bucket_id):
38 # Do nothing if current request does not involve a collection.
39 subpath = request.matchdict.get('subpath')
40 if not (subpath and subpath.rstrip('/').startswith('collections/')):
41 return
42
43 collection_id = subpath.split('/')[1]
44 collection_uri = instance_uri(request, 'collection',
45 bucket_id=bucket_id,
46 id=collection_id)
47
48 # Do not intent to create multiple times per request (e.g. in batch).
49 already_created = request.bound_data.setdefault('collections', {})
50 if collection_uri in already_created:
51 return
52
53 # Do nothing if current request will already create the collection.
54 collection_put = (request.method.lower() == 'put' and
55 request.path.endswith(collection_id))
56 if collection_put:
57 return
58
59 collection = resource_create_object(request=request,
60 resource_cls=Collection,
61 uri=collection_uri)
62 already_created[collection_uri] = collection
63
64
65 def resource_create_object(request, resource_cls, uri):
66 """In the default bucket, the bucket and collection are implicitly
67 created. This helper instantiate the resource and simulate a request
68 with its RootFactory on the instantiated resource.
69 :returns: the created object
70 :rtype: dict
71 """
72 resource_name, matchdict = view_lookup(request, uri)
73
74 # Build a fake request, mainly used to populate the create events that
75 # will be triggered by the resource.
76 fakerequest = build_request(request, {
77 'method': 'PUT',
78 'path': uri,
79 })
80 fakerequest.matchdict = matchdict
81 fakerequest.bound_data = request.bound_data
82 fakerequest.authn_type = request.authn_type
83 fakerequest.selected_userid = request.selected_userid
84 fakerequest.errors = request.errors
85 fakerequest.current_resource_name = resource_name
86
87 obj_id = matchdict['id']
88
89 # Fake context, required to instantiate a resource.
90 context = RouteFactory(fakerequest)
91 context.resource_name = resource_name
92 resource = resource_cls(fakerequest, context)
93
94 # Check that provided id is valid for this resource.
95 if not resource.model.id_generator.match(obj_id):
96 error_details = {
97 'location': 'path',
98 'description': "Invalid {} id".format(resource_name)
99 }
100 raise_invalid(resource.request, **error_details)
101
102 data = {'id': obj_id}
103 obj = resource.model.create_record(data, ignore_conflict=True)
104 # Since the current request is not a resource (but a straight Service),
105 # we simulate a request on a resource.
106 # This will be used in the resource event payload.
107 resource.postprocess(obj, action=ACTIONS.CREATE)
108 return obj
109
110
111 def default_bucket(request):
112 if request.method.lower() == 'options':
113 path = request.path.replace('default', 'unknown')
114 subrequest = build_request(request, {
115 'method': 'OPTIONS',
116 'path': path
117 })
118 return request.invoke_subrequest(subrequest)
119
120 if Authenticated not in request.effective_principals:
121 # Pass through the forbidden_view_config
122 raise httpexceptions.HTTPForbidden()
123
124 settings = request.registry.settings
125
126 if asbool(settings['readonly']):
127 raise httpexceptions.HTTPMethodNotAllowed()
128
129 bucket_id = request.default_bucket_id
130
131 # Implicit object creations.
132 # Make sure bucket exists
133 create_bucket(request, bucket_id)
134 # Make sure the collection exists
135 create_collection(request, bucket_id)
136
137 path = request.path.replace('/buckets/default', '/buckets/{}'.format(bucket_id))
138 querystring = request.url[(request.url.index(request.path) +
139 len(request.path)):]
140 try:
141 # If 'id' is provided as 'default', replace with actual bucket id.
142 body = request.json
143 body['data']['id'] = body['data']['id'].replace('default', bucket_id)
144 except:
145 body = request.body or {"data": {}}
146 subrequest = build_request(request, {
147 'method': request.method,
148 'path': path + querystring,
149 'body': body,
150 })
151 subrequest.bound_data = request.bound_data
152
153 try:
154 response = request.invoke_subrequest(subrequest)
155 except httpexceptions.HTTPException as error:
156 is_redirect = error.status_code < 400
157 if error.content_type == 'application/json' or is_redirect:
158 response = reapply_cors(subrequest, error)
159 else:
160 # Ask the upper level to format the error.
161 raise error
162 return response
163
164
165 def default_bucket_id(request):
166 settings = request.registry.settings
167 secret = settings['userid_hmac_secret']
168 # Build the user unguessable bucket_id UUID from its user_id
169 digest = hmac_digest(secret, request.prefixed_userid)
170 return str(uuid.UUID(digest[:32]))
171
172
173 def get_user_info(request):
174 user_info = {
175 'id': request.prefixed_userid,
176 'bucket': request.default_bucket_id
177 }
178 return user_info
179
180
181 def includeme(config):
182 # Redirect default to the right endpoint
183 config.add_view(default_bucket,
184 route_name='default_bucket',
185 permission=NO_PERMISSION_REQUIRED)
186 config.add_view(default_bucket,
187 route_name='default_bucket_collection',
188 permission=NO_PERMISSION_REQUIRED)
189
190 config.add_route('default_bucket_collection',
191 '/buckets/default/{subpath:.*}')
192 config.add_route('default_bucket', '/buckets/default')
193
194 # Provide helpers
195 config.add_request_method(default_bucket_id, reify=True)
196 # Override kinto.core default user info
197 config.add_request_method(get_user_info)
198
199 config.add_api_capability(
200 "default_bucket",
201 description="The default bucket is an alias for a personal"
202 " bucket where collections are created implicitly.",
203 url="https://kinto.readthedocs.io/en/latest/api/1.x/"
204 "buckets.html#personal-bucket-default")
205
[end of kinto/plugins/default_bucket/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py
--- a/kinto/plugins/default_bucket/__init__.py
+++ b/kinto/plugins/default_bucket/__init__.py
@@ -6,6 +6,7 @@
from kinto.core.errors import raise_invalid
from kinto.core.events import ACTIONS
+from kinto.core.storage.exceptions import UnicityError
from kinto.core.utils import (
build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)
@@ -100,7 +101,12 @@
raise_invalid(resource.request, **error_details)
data = {'id': obj_id}
- obj = resource.model.create_record(data, ignore_conflict=True)
+ try:
+ obj = resource.model.create_record(data)
+ except UnicityError as e:
+ # The record already exists; skip running events
+ return e.record
+
# Since the current request is not a resource (but a straight Service),
# we simulate a request on a resource.
# This will be used in the resource event payload.
|
{"golden_diff": "diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py\n--- a/kinto/plugins/default_bucket/__init__.py\n+++ b/kinto/plugins/default_bucket/__init__.py\n@@ -6,6 +6,7 @@\n \n from kinto.core.errors import raise_invalid\n from kinto.core.events import ACTIONS\n+from kinto.core.storage.exceptions import UnicityError\n from kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\n \n@@ -100,7 +101,12 @@\n raise_invalid(resource.request, **error_details)\n \n data = {'id': obj_id}\n- obj = resource.model.create_record(data, ignore_conflict=True)\n+ try:\n+ obj = resource.model.create_record(data)\n+ except UnicityError as e:\n+ # The record already exists; skip running events\n+ return e.record\n+\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n", "issue": "GET on default bucket increases quota usage\nWhile investigating #1225, I discovered that *every* GET request to e.g. `/buckets/default/collections/some-collection` causes quota usage to increase. For example,\r\n\r\n```\r\nselect * from records where collection_id='quota';\r\n id | parent_id \r\n | collection_id | last_modified | \r\n data \r\n-----------------+--------------------------------------------------------------\r\n-------------------+---------------+----------------------------+---------------\r\n----------------------------------------------------\r\n bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5 \r\n | quota | 2017-05-22 18:07:14.715782 | {\"record_count\r\n\": 0, \"storage_size\": 1745, \"collection_count\": 13}\r\n collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n\r\newest-collection-3 | quota | 2017-05-22 18:07:14.716923 | {\"record_count\r\n\": 0, \"storage_size\": 540}\r\n```\r\n\r\n```\r\n> http --auth 'user:pass6' 'localhost:8888/v1/buckets/default/collections/a-newest-collection-3'\r\nHTTP/1.1 200 OK\r\n..... more junk .....\r\n```\r\n\r\n```\r\nselect * from records where collection_id='quota';\r\n id | parent_id \r\n | collection_id | last_modified | \r\n data \r\n-----------------+--------------------------------------------------------------\r\n-------------------+---------------+----------------------------+---------------\r\n----------------------------------------------------\r\n bucket_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5 \r\n | quota | 2017-05-22 18:56:52.448115 | {\"record_count\r\n\": 0, \"storage_size\": 1880, \"collection_count\": 14}\r\n collection_info | /buckets/74f86a68-a03c-a5f5-fb29-e421b362f5b5/collections/a-n\r\newest-collection-3 | quota | 2017-05-22 18:56:52.4493 | {\"record_count\r\n\": 0, \"storage_size\": 600}\r\n```\r\n\r\nThis appears to be partly because every request that goes to the default bucket \"creates\" a bucket and collection.\n", "before_files": [{"content": "import uuid\n\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom kinto.core.errors import raise_invalid\nfrom kinto.core.events import ACTIONS\nfrom kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n bucket_uri = instance_uri(request, 'bucket', id=bucket_id)\n bucket = resource_create_object(request=request,\n resource_cls=Bucket,\n uri=bucket_uri)\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.rstrip('/').startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = instance_uri(request, 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n collection = resource_create_object(request=request,\n resource_cls=Collection,\n uri=collection_uri)\n already_created[collection_uri] = collection\n\n\ndef resource_create_object(request, resource_cls, uri):\n \"\"\"In the default bucket, the bucket and collection are implicitly\n created. This helper instantiate the resource and simulate a request\n with its RootFactory on the instantiated resource.\n :returns: the created object\n :rtype: dict\n \"\"\"\n resource_name, matchdict = view_lookup(request, uri)\n\n # Build a fake request, mainly used to populate the create events that\n # will be triggered by the resource.\n fakerequest = build_request(request, {\n 'method': 'PUT',\n 'path': uri,\n })\n fakerequest.matchdict = matchdict\n fakerequest.bound_data = request.bound_data\n fakerequest.authn_type = request.authn_type\n fakerequest.selected_userid = request.selected_userid\n fakerequest.errors = request.errors\n fakerequest.current_resource_name = resource_name\n\n obj_id = matchdict['id']\n\n # Fake context, required to instantiate a resource.\n context = RouteFactory(fakerequest)\n context.resource_name = resource_name\n resource = resource_cls(fakerequest, context)\n\n # Check that provided id is valid for this resource.\n if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid {} id\".format(resource_name)\n }\n raise_invalid(resource.request, **error_details)\n\n data = {'id': obj_id}\n obj = resource.model.create_record(data, ignore_conflict=True)\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n resource.postprocess(obj, action=ACTIONS.CREATE)\n return obj\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n\n # Implicit object creations.\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n path = request.path.replace('/buckets/default', '/buckets/{}'.format(bucket_id))\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n try:\n # If 'id' is provided as 'default', replace with actual bucket id.\n body = request.json\n body['data']['id'] = body['data']['id'].replace('default', bucket_id)\n except:\n body = request.body or {\"data\": {}}\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': body,\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return str(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override kinto.core default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}]}
| 3,216 | 244 |
gh_patches_debug_14439
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-715
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fitting with log_gpu_memory=True fails in python3.6.
## Bug
Fitting with `log_gpu_memory=True` in the Trainer fails in python3.6 version.
### To Reproduce
1. Use python3.6 version
2. Create any trainer with `log_gpu_memory=True` option.
3. Then fit it.
3. See error:
```
/a/pytorch-lightning/pytorch_lightning/core/memory.py in get_gpu_memory_map()
237 encoding='utf-8',
238 capture_output=True,
--> 239 check=True)
240 # Convert lines into a dictionary
241 gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]
/usr/lib/python3.6/subprocess.py in run(input, timeout, check, *popenargs, **kwargs)
421 kwargs['stdin'] = PIPE
422
--> 423 with Popen(*popenargs, **kwargs) as process:
424 try:
425 stdout, stderr = process.communicate(input, timeout=timeout)
TypeError: __init__() got an unexpected keyword argument 'capture_output'
```
#### Code sample
```
trainer = Trainer(
log_gpu_memory=True,
# ....
)
trainer.fit()
```
### Expected behavior
For the same code there is no errors for python3.7
### Environment
pytorch: 1.2.0
Ubuntu 18.04
pytorch-lightning:
- installed to pip environment
- commit 7a1df80f4e98fca
- python setup.py develop
- version 0.6.0
python: 3.6.8
cuda: 10.0, V10.0.130
cudnn: 7.6.2
GPU: RTX 2080 TI
### Additional context
In the `setup.py`
python_requires='>=3.6',
But `capture_output` is used in `subprocess.run` calling, which is valid only for python3.7
See also workaround to maintain python3.6:
https://stackoverflow.com/questions/53209127/
</issue>
<code>
[start of pytorch_lightning/core/memory.py]
1 '''
2 Generates a summary of a model's layers and dimensionality
3 '''
4
5 import gc
6 import os
7 import subprocess
8
9 import numpy as np
10 import pandas as pd
11 import torch
12 import logging
13
14
15 class ModelSummary(object):
16
17 def __init__(self, model, mode='full'):
18 '''
19 Generates summaries of model layers and dimensions.
20 '''
21 self.model = model
22 self.mode = mode
23 self.in_sizes = []
24 self.out_sizes = []
25
26 self.summarize()
27
28 def __str__(self):
29 return self.summary.__str__()
30
31 def __repr__(self):
32 return self.summary.__str__()
33
34 def named_modules(self):
35 if self.mode == 'full':
36 mods = self.model.named_modules()
37 mods = list(mods)[1:] # do not include root module (LightningModule)
38 elif self.mode == 'top':
39 # the children are the top-level modules
40 mods = self.model.named_children()
41 else:
42 mods = []
43 return list(mods)
44
45 def get_variable_sizes(self):
46 '''Run sample input through each layer to get output sizes'''
47 mods = self.named_modules()
48 in_sizes = []
49 out_sizes = []
50 input_ = self.model.example_input_array
51
52 if self.model.on_gpu:
53 device = next(self.model.parameters()).get_device()
54 # test if input is a list or a tuple
55 if isinstance(input_, (list, tuple)):
56 input_ = [input_i.cuda(device) if torch.is_tensor(input_i) else input_i
57 for input_i in input_]
58 else:
59 input_ = input_.cuda(device)
60
61 if self.model.trainer.use_amp:
62 # test if it is not a list or a tuple
63 if isinstance(input_, (list, tuple)):
64 input_ = [input_i.half() if torch.is_tensor(input_i) else input_i
65 for input_i in input_]
66 else:
67 input_ = input_.half()
68
69 with torch.no_grad():
70
71 for _, m in mods:
72 if isinstance(input_, (list, tuple)): # pragma: no cover
73 out = m(*input_)
74 else:
75 out = m(input_)
76
77 if isinstance(input_, (list, tuple)): # pragma: no cover
78 in_size = []
79 for x in input_:
80 if type(x) is list:
81 in_size.append(len(x))
82 else:
83 in_size.append(x.size())
84 else:
85 in_size = np.array(input_.size())
86
87 in_sizes.append(in_size)
88
89 if isinstance(out, (list, tuple)): # pragma: no cover
90 out_size = np.asarray([x.size() for x in out])
91 else:
92 out_size = np.array(out.size())
93
94 out_sizes.append(out_size)
95 input_ = out
96
97 self.in_sizes = in_sizes
98 self.out_sizes = out_sizes
99 assert len(in_sizes) == len(out_sizes)
100 return
101
102 def get_layer_names(self):
103 '''Collect Layer Names'''
104 mods = self.named_modules()
105 names = []
106 layers = []
107 for name, m in mods:
108 names += [name]
109 layers += [str(m.__class__)]
110
111 layer_types = [x.split('.')[-1][:-2] for x in layers]
112
113 self.layer_names = names
114 self.layer_types = layer_types
115 return
116
117 def get_parameter_sizes(self):
118 '''Get sizes of all parameters in `model`'''
119 mods = self.named_modules()
120 sizes = []
121 for _, m in mods:
122 p = list(m.parameters())
123 modsz = []
124 for j in range(len(p)):
125 modsz.append(np.array(p[j].size()))
126 sizes.append(modsz)
127
128 self.param_sizes = sizes
129 return
130
131 def get_parameter_nums(self):
132 '''Get number of parameters in each layer'''
133 param_nums = []
134 for mod in self.param_sizes:
135 all_params = 0
136 for p in mod:
137 all_params += np.prod(p)
138 param_nums.append(all_params)
139 self.param_nums = param_nums
140 return
141
142 def make_summary(self):
143 '''
144 Makes a summary listing with:
145
146 Layer Name, Layer Type, Input Size, Output Size, Number of Parameters
147 '''
148
149 cols = ['Name', 'Type', 'Params']
150 if self.model.example_input_array is not None:
151 cols.extend(['In_sizes', 'Out_sizes'])
152
153 df = pd.DataFrame(np.zeros((len(self.layer_names), len(cols))))
154 df.columns = cols
155
156 df['Name'] = self.layer_names
157 df['Type'] = self.layer_types
158 df['Params'] = self.param_nums
159 df['Params'] = df['Params'].map(get_human_readable_count)
160
161 if self.model.example_input_array is not None:
162 df['In_sizes'] = self.in_sizes
163 df['Out_sizes'] = self.out_sizes
164
165 self.summary = df
166 return
167
168 def summarize(self):
169 self.get_layer_names()
170 self.get_parameter_sizes()
171 self.get_parameter_nums()
172
173 if self.model.example_input_array is not None:
174 self.get_variable_sizes()
175 self.make_summary()
176
177
178 def print_mem_stack(): # pragma: no cover
179 for obj in gc.get_objects():
180 try:
181 if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
182 logging.info(type(obj), obj.size())
183 except Exception:
184 pass
185
186
187 def count_mem_items(): # pragma: no cover
188 num_params = 0
189 num_tensors = 0
190 for obj in gc.get_objects():
191 try:
192 if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
193 obj_type = str(type(obj))
194 if 'parameter' in obj_type:
195 num_params += 1
196 else:
197 num_tensors += 1
198 except Exception:
199 pass
200
201 return num_params, num_tensors
202
203
204 def get_memory_profile(mode):
205 """
206 'all' means return memory for all gpus
207 'min_max' means return memory for max and min
208 :param mode:
209 :return:
210 """
211 memory_map = get_gpu_memory_map()
212
213 if mode == 'min_max':
214 min_index, min_memory = min(memory_map.items(), key=lambda item: item[1])
215 max_index, max_memory = max(memory_map.items(), key=lambda item: item[1])
216
217 memory_map = {min_index: min_memory, max_index: max_memory}
218
219 return memory_map
220
221
222 def get_gpu_memory_map():
223 """Get the current gpu usage.
224
225 Returns
226 -------
227 usage: dict
228 Keys are device ids as integers.
229 Values are memory usage as integers in MB.
230 """
231 result = subprocess.run(
232 [
233 'nvidia-smi',
234 '--query-gpu=memory.used',
235 '--format=csv,nounits,noheader',
236 ],
237 encoding='utf-8',
238 capture_output=True,
239 check=True)
240 # Convert lines into a dictionary
241 gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]
242 gpu_memory_map = {f'gpu_{index}': memory for index, memory in enumerate(gpu_memory)}
243 return gpu_memory_map
244
245
246 def get_human_readable_count(number):
247 """
248 Abbreviates an integer number with K, M, B, T for thousands, millions,
249 billions and trillions, respectively.
250 Examples:
251 123 -> 123
252 1234 -> 1 K (one thousand)
253 2e6 -> 2 M (two million)
254 3e9 -> 3 B (three billion)
255 4e12 -> 4 T (four trillion)
256 5e15 -> 5,000 T
257 :param number: a positive integer number
258 :returns a string formatted according to the pattern described above.
259 """
260 assert number >= 0
261 labels = [' ', 'K', 'M', 'B', 'T']
262 num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)
263 num_groups = int(np.ceil(num_digits / 3))
264 num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
265 shift = -3 * (num_groups - 1)
266 number = number * (10 ** shift)
267 index = num_groups - 1
268 return f'{int(number):,d} {labels[index]}'
269
[end of pytorch_lightning/core/memory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py
--- a/pytorch_lightning/core/memory.py
+++ b/pytorch_lightning/core/memory.py
@@ -5,6 +5,7 @@
import gc
import os
import subprocess
+from subprocess import PIPE
import numpy as np
import pandas as pd
@@ -235,7 +236,8 @@
'--format=csv,nounits,noheader',
],
encoding='utf-8',
- capture_output=True,
+ # capture_output=True, # valid for python version >=3.7
+ stdout=PIPE, stderr=PIPE, # for backward compatibility with python version 3.6
check=True)
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]
|
{"golden_diff": "diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py\n--- a/pytorch_lightning/core/memory.py\n+++ b/pytorch_lightning/core/memory.py\n@@ -5,6 +5,7 @@\n import gc\n import os\n import subprocess\n+from subprocess import PIPE\n \n import numpy as np\n import pandas as pd\n@@ -235,7 +236,8 @@\n '--format=csv,nounits,noheader',\n ],\n encoding='utf-8',\n- capture_output=True,\n+ # capture_output=True, # valid for python version >=3.7\n+ stdout=PIPE, stderr=PIPE, # for backward compatibility with python version 3.6\n check=True)\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\n", "issue": "Fitting with log_gpu_memory=True fails in python3.6.\n## Bug\r\n\r\nFitting with `log_gpu_memory=True` in the Trainer fails in python3.6 version.\r\n\r\n### To Reproduce\r\n\r\n1. Use python3.6 version\r\n2. Create any trainer with `log_gpu_memory=True` option.\r\n3. Then fit it.\r\n3. See error:\r\n\r\n```\r\n/a/pytorch-lightning/pytorch_lightning/core/memory.py in get_gpu_memory_map()\r\n 237 encoding='utf-8',\r\n 238 capture_output=True,\r\n--> 239 check=True)\r\n 240 # Convert lines into a dictionary\r\n 241 gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\r\n\r\n/usr/lib/python3.6/subprocess.py in run(input, timeout, check, *popenargs, **kwargs)\r\n 421 kwargs['stdin'] = PIPE\r\n 422 \r\n--> 423 with Popen(*popenargs, **kwargs) as process:\r\n 424 try:\r\n 425 stdout, stderr = process.communicate(input, timeout=timeout)\r\n\r\nTypeError: __init__() got an unexpected keyword argument 'capture_output'\r\n\r\n```\r\n\r\n\r\n#### Code sample\r\n\r\n```\r\ntrainer = Trainer(\r\n log_gpu_memory=True,\r\n # ....\r\n)\r\ntrainer.fit()\r\n```\r\n\r\n### Expected behavior\r\n\r\nFor the same code there is no errors for python3.7\r\n\r\n### Environment\r\n\r\npytorch: 1.2.0\r\nUbuntu 18.04\r\npytorch-lightning: \r\n - installed to pip environment\r\n - commit 7a1df80f4e98fca\r\n - python setup.py develop\r\n - version 0.6.0 \r\npython: 3.6.8\r\ncuda: 10.0, V10.0.130 \r\ncudnn: 7.6.2\r\nGPU: RTX 2080 TI\r\n\r\n### Additional context\r\n\r\nIn the `setup.py`\r\n python_requires='>=3.6',\r\n\r\nBut `capture_output` is used in `subprocess.run` calling, which is valid only for python3.7\r\nSee also workaround to maintain python3.6:\r\nhttps://stackoverflow.com/questions/53209127/\r\n\r\n\n", "before_files": [{"content": "'''\nGenerates a summary of a model's layers and dimensionality\n'''\n\nimport gc\nimport os\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport logging\n\n\nclass ModelSummary(object):\n\n def __init__(self, model, mode='full'):\n '''\n Generates summaries of model layers and dimensions.\n '''\n self.model = model\n self.mode = mode\n self.in_sizes = []\n self.out_sizes = []\n\n self.summarize()\n\n def __str__(self):\n return self.summary.__str__()\n\n def __repr__(self):\n return self.summary.__str__()\n\n def named_modules(self):\n if self.mode == 'full':\n mods = self.model.named_modules()\n mods = list(mods)[1:] # do not include root module (LightningModule)\n elif self.mode == 'top':\n # the children are the top-level modules\n mods = self.model.named_children()\n else:\n mods = []\n return list(mods)\n\n def get_variable_sizes(self):\n '''Run sample input through each layer to get output sizes'''\n mods = self.named_modules()\n in_sizes = []\n out_sizes = []\n input_ = self.model.example_input_array\n\n if self.model.on_gpu:\n device = next(self.model.parameters()).get_device()\n # test if input is a list or a tuple\n if isinstance(input_, (list, tuple)):\n input_ = [input_i.cuda(device) if torch.is_tensor(input_i) else input_i\n for input_i in input_]\n else:\n input_ = input_.cuda(device)\n\n if self.model.trainer.use_amp:\n # test if it is not a list or a tuple\n if isinstance(input_, (list, tuple)):\n input_ = [input_i.half() if torch.is_tensor(input_i) else input_i\n for input_i in input_]\n else:\n input_ = input_.half()\n\n with torch.no_grad():\n\n for _, m in mods:\n if isinstance(input_, (list, tuple)): # pragma: no cover\n out = m(*input_)\n else:\n out = m(input_)\n\n if isinstance(input_, (list, tuple)): # pragma: no cover\n in_size = []\n for x in input_:\n if type(x) is list:\n in_size.append(len(x))\n else:\n in_size.append(x.size())\n else:\n in_size = np.array(input_.size())\n\n in_sizes.append(in_size)\n\n if isinstance(out, (list, tuple)): # pragma: no cover\n out_size = np.asarray([x.size() for x in out])\n else:\n out_size = np.array(out.size())\n\n out_sizes.append(out_size)\n input_ = out\n\n self.in_sizes = in_sizes\n self.out_sizes = out_sizes\n assert len(in_sizes) == len(out_sizes)\n return\n\n def get_layer_names(self):\n '''Collect Layer Names'''\n mods = self.named_modules()\n names = []\n layers = []\n for name, m in mods:\n names += [name]\n layers += [str(m.__class__)]\n\n layer_types = [x.split('.')[-1][:-2] for x in layers]\n\n self.layer_names = names\n self.layer_types = layer_types\n return\n\n def get_parameter_sizes(self):\n '''Get sizes of all parameters in `model`'''\n mods = self.named_modules()\n sizes = []\n for _, m in mods:\n p = list(m.parameters())\n modsz = []\n for j in range(len(p)):\n modsz.append(np.array(p[j].size()))\n sizes.append(modsz)\n\n self.param_sizes = sizes\n return\n\n def get_parameter_nums(self):\n '''Get number of parameters in each layer'''\n param_nums = []\n for mod in self.param_sizes:\n all_params = 0\n for p in mod:\n all_params += np.prod(p)\n param_nums.append(all_params)\n self.param_nums = param_nums\n return\n\n def make_summary(self):\n '''\n Makes a summary listing with:\n\n Layer Name, Layer Type, Input Size, Output Size, Number of Parameters\n '''\n\n cols = ['Name', 'Type', 'Params']\n if self.model.example_input_array is not None:\n cols.extend(['In_sizes', 'Out_sizes'])\n\n df = pd.DataFrame(np.zeros((len(self.layer_names), len(cols))))\n df.columns = cols\n\n df['Name'] = self.layer_names\n df['Type'] = self.layer_types\n df['Params'] = self.param_nums\n df['Params'] = df['Params'].map(get_human_readable_count)\n\n if self.model.example_input_array is not None:\n df['In_sizes'] = self.in_sizes\n df['Out_sizes'] = self.out_sizes\n\n self.summary = df\n return\n\n def summarize(self):\n self.get_layer_names()\n self.get_parameter_sizes()\n self.get_parameter_nums()\n\n if self.model.example_input_array is not None:\n self.get_variable_sizes()\n self.make_summary()\n\n\ndef print_mem_stack(): # pragma: no cover\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n logging.info(type(obj), obj.size())\n except Exception:\n pass\n\n\ndef count_mem_items(): # pragma: no cover\n num_params = 0\n num_tensors = 0\n for obj in gc.get_objects():\n try:\n if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):\n obj_type = str(type(obj))\n if 'parameter' in obj_type:\n num_params += 1\n else:\n num_tensors += 1\n except Exception:\n pass\n\n return num_params, num_tensors\n\n\ndef get_memory_profile(mode):\n \"\"\"\n 'all' means return memory for all gpus\n 'min_max' means return memory for max and min\n :param mode:\n :return:\n \"\"\"\n memory_map = get_gpu_memory_map()\n\n if mode == 'min_max':\n min_index, min_memory = min(memory_map.items(), key=lambda item: item[1])\n max_index, max_memory = max(memory_map.items(), key=lambda item: item[1])\n\n memory_map = {min_index: min_memory, max_index: max_memory}\n\n return memory_map\n\n\ndef get_gpu_memory_map():\n \"\"\"Get the current gpu usage.\n\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.run(\n [\n 'nvidia-smi',\n '--query-gpu=memory.used',\n '--format=csv,nounits,noheader',\n ],\n encoding='utf-8',\n capture_output=True,\n check=True)\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.stdout.strip().split(os.linesep)]\n gpu_memory_map = {f'gpu_{index}': memory for index, memory in enumerate(gpu_memory)}\n return gpu_memory_map\n\n\ndef get_human_readable_count(number):\n \"\"\"\n Abbreviates an integer number with K, M, B, T for thousands, millions,\n billions and trillions, respectively.\n Examples:\n 123 -> 123\n 1234 -> 1 K (one thousand)\n 2e6 -> 2 M (two million)\n 3e9 -> 3 B (three billion)\n 4e12 -> 4 T (four trillion)\n 5e15 -> 5,000 T\n :param number: a positive integer number\n :returns a string formatted according to the pattern described above.\n \"\"\"\n assert number >= 0\n labels = [' ', 'K', 'M', 'B', 'T']\n num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)\n num_groups = int(np.ceil(num_digits / 3))\n num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions\n shift = -3 * (num_groups - 1)\n number = number * (10 ** shift)\n index = num_groups - 1\n return f'{int(number):,d} {labels[index]}'\n", "path": "pytorch_lightning/core/memory.py"}]}
| 3,615 | 188 |
gh_patches_debug_7918
|
rasdani/github-patches
|
git_diff
|
NVIDIA__NVFlare-75
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CIFAR10 run_fl.py misses license header
https://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1
</issue>
<code>
[start of examples/cifar10/run_fl.py]
1 import argparse
2 import os
3 import time
4
5 from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner
6
7
8 def main():
9 parser = argparse.ArgumentParser()
10 parser.add_argument("--run_number", type=int, default=100, help="FL run number to start at.")
11 parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
12 parser.add_argument("--username", type=str, default="[email protected]", help="Admin username")
13 parser.add_argument("--app", type=str, default="cifar10_fedavg", help="App to be deployed")
14 parser.add_argument("--port", type=int, default=8003, help="The admin server port")
15 parser.add_argument("--poc", action='store_true', help="Whether admin uses POC mode.")
16 parser.add_argument("--min_clients", type=int, default=8, help="Minimum number of clients.")
17 args = parser.parse_args()
18
19 host = ""
20 port = args.port
21
22 assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
23
24 # Set up certificate names and admin folders
25 upload_dir = os.path.join(args.admin_dir, "transfer")
26 if not os.path.isdir(upload_dir):
27 os.makedirs(upload_dir)
28 download_dir = os.path.join(args.admin_dir, "download")
29 if not os.path.isdir(download_dir):
30 os.makedirs(download_dir)
31
32 run_number = args.run_number
33
34 # Initialize the runner
35 runner = FLAdminAPIRunner(
36 host=host,
37 port=port,
38 username=args.username,
39 admin_dir=args.admin_dir,
40 poc=args.poc,
41 debug=False,
42 )
43
44 # Run
45 start = time.time()
46 runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,
47 timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours
48 print("Total training time", time.time() - start)
49
50
51 if __name__ == "__main__":
52 main()
53
[end of examples/cifar10/run_fl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py
--- a/examples/cifar10/run_fl.py
+++ b/examples/cifar10/run_fl.py
@@ -1,3 +1,17 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import argparse
import os
import time
|
{"golden_diff": "diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py\n--- a/examples/cifar10/run_fl.py\n+++ b/examples/cifar10/run_fl.py\n@@ -1,3 +1,17 @@\n+# Copyright (c) 2021, NVIDIA CORPORATION.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import argparse\n import os\n import time\n", "issue": "CIFAR10 run_fl.py misses license header\nhttps://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1\n", "before_files": [{"content": "import argparse\nimport os\nimport time\n\nfrom nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_number\", type=int, default=100, help=\"FL run number to start at.\")\n parser.add_argument(\"--admin_dir\", type=str, default=\"./admin/\", help=\"Path to admin directory.\")\n parser.add_argument(\"--username\", type=str, default=\"[email protected]\", help=\"Admin username\")\n parser.add_argument(\"--app\", type=str, default=\"cifar10_fedavg\", help=\"App to be deployed\")\n parser.add_argument(\"--port\", type=int, default=8003, help=\"The admin server port\")\n parser.add_argument(\"--poc\", action='store_true', help=\"Whether admin uses POC mode.\")\n parser.add_argument(\"--min_clients\", type=int, default=8, help=\"Minimum number of clients.\")\n args = parser.parse_args()\n\n host = \"\"\n port = args.port\n\n assert os.path.isdir(args.admin_dir), f\"admin directory does not exist at {args.admin_dir}\"\n\n # Set up certificate names and admin folders\n upload_dir = os.path.join(args.admin_dir, \"transfer\")\n if not os.path.isdir(upload_dir):\n os.makedirs(upload_dir)\n download_dir = os.path.join(args.admin_dir, \"download\")\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n run_number = args.run_number\n\n # Initialize the runner\n runner = FLAdminAPIRunner(\n host=host,\n port=port,\n username=args.username,\n admin_dir=args.admin_dir,\n poc=args.poc,\n debug=False,\n )\n\n # Run\n start = time.time()\n runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,\n timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours\n print(\"Total training time\", time.time() - start)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/cifar10/run_fl.py"}]}
| 1,167 | 202 |
gh_patches_debug_29982
|
rasdani/github-patches
|
git_diff
|
astronomer__astro-sdk-453
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change `export_file` to return `File` object
**Context**
In order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.
* Astro-SDK version: 0.9..1
* Request by: @jlaneve
* Analysed by @dimberman
**Problem**
At the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:
```
output_file = File(path="/tmp/saved_df.csv")
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
export = aql.export_file(
input_data=table,
output_file=output_file,
if_exists="replace",
)
res_df = aql.load_file(input_file=output_file)
export >> res_df
```
**Desired behaviour**
```
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
exported_file = aql.export_file(
input_data=table,
output_file=File(path="/tmp/saved_df.csv"),
if_exists="replace",
)
res_df = aql.load_file(input_file=exported_file)
```
**Acceptance criteria**
* Change `export_file` so it returns the `File` instance, as opposed to `None`
Since there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.
</issue>
<code>
[start of src/astro/sql/operators/export_file.py]
1 from typing import Optional, Union
2
3 import pandas as pd
4 from airflow.models import BaseOperator
5 from airflow.models.xcom_arg import XComArg
6
7 from astro.constants import ExportExistsStrategy
8 from astro.databases import create_database
9 from astro.files import File
10 from astro.sql.table import Table
11 from astro.utils.task_id_helper import get_task_id
12
13
14 class ExportFile(BaseOperator):
15 """Write SQL table to csv/parquet on local/S3/GCS.
16
17 :param input_data: Table to convert to file
18 :param output_file: File object containing the path to the file and connection id.
19 :param if_exists: Overwrite file if exists. Default False.
20 """
21
22 template_fields = ("input_data", "output_file")
23
24 def __init__(
25 self,
26 input_data: Union[Table, pd.DataFrame],
27 output_file: File,
28 if_exists: ExportExistsStrategy = "exception",
29 **kwargs,
30 ) -> None:
31 super().__init__(**kwargs)
32 self.output_file = output_file
33 self.input_data = input_data
34 self.if_exists = if_exists
35 self.kwargs = kwargs
36
37 def execute(self, context: dict) -> None:
38 """Write SQL table to csv/parquet on local/S3/GCS.
39
40 Infers SQL database type based on connection.
41 """
42 # Infer db type from `input_conn_id`.
43 if isinstance(self.input_data, Table):
44 database = create_database(self.input_data.conn_id)
45 self.input_data = database.populate_table_metadata(self.input_data)
46 df = database.export_table_to_pandas_dataframe(self.input_data)
47 elif isinstance(self.input_data, pd.DataFrame):
48 df = self.input_data
49 else:
50 raise ValueError(
51 f"Expected input_table to be Table or dataframe. Got {type(self.input_data)}"
52 )
53 # Write file if overwrite == True or if file doesn't exist.
54 if self.if_exists == "replace" or not self.output_file.exists():
55 self.output_file.create_from_dataframe(df)
56 else:
57 raise FileExistsError(f"{self.output_file.path} file already exists.")
58
59
60 def export_file(
61 input_data: Union[Table, pd.DataFrame],
62 output_file: File,
63 if_exists: ExportExistsStrategy = "exception",
64 task_id: Optional[str] = None,
65 **kwargs,
66 ) -> XComArg:
67 """Convert SaveFile into a function. Returns XComArg.
68
69 Returns an XComArg object.
70
71 :param output_file: Path and conn_id
72 :param input_data: Input table / dataframe
73 :param if_exists: Overwrite file if exists. Default "exception"
74 :param task_id: task id, optional
75 """
76
77 task_id = (
78 task_id if task_id is not None else get_task_id("export_file", output_file.path)
79 )
80
81 return ExportFile(
82 task_id=task_id,
83 output_file=output_file,
84 input_data=input_data,
85 if_exists=if_exists,
86 ).output
87
[end of src/astro/sql/operators/export_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py
--- a/src/astro/sql/operators/export_file.py
+++ b/src/astro/sql/operators/export_file.py
@@ -34,7 +34,7 @@
self.if_exists = if_exists
self.kwargs = kwargs
- def execute(self, context: dict) -> None:
+ def execute(self, context: dict) -> File:
"""Write SQL table to csv/parquet on local/S3/GCS.
Infers SQL database type based on connection.
@@ -53,6 +53,7 @@
# Write file if overwrite == True or if file doesn't exist.
if self.if_exists == "replace" or not self.output_file.exists():
self.output_file.create_from_dataframe(df)
+ return self.output_file
else:
raise FileExistsError(f"{self.output_file.path} file already exists.")
@@ -66,7 +67,20 @@
) -> XComArg:
"""Convert SaveFile into a function. Returns XComArg.
- Returns an XComArg object.
+ Returns an XComArg object of type File which matches the output_file parameter.
+
+ This will allow users to perform further actions with the exported file.
+
+ e.g.
+
+ with sample_dag:
+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
+ exported_file = aql.export_file(
+ input_data=table,
+ output_file=File(path="/tmp/saved_df.csv"),
+ if_exists="replace",
+ )
+ res_df = aql.load_file(input_file=exported_file)
:param output_file: Path and conn_id
:param input_data: Input table / dataframe
|
{"golden_diff": "diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py\n--- a/src/astro/sql/operators/export_file.py\n+++ b/src/astro/sql/operators/export_file.py\n@@ -34,7 +34,7 @@\n self.if_exists = if_exists\n self.kwargs = kwargs\n \n- def execute(self, context: dict) -> None:\n+ def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n \n Infers SQL database type based on connection.\n@@ -53,6 +53,7 @@\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n+ return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n \n@@ -66,7 +67,20 @@\n ) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n \n- Returns an XComArg object.\n+ Returns an XComArg object of type File which matches the output_file parameter.\n+\n+ This will allow users to perform further actions with the exported file.\n+\n+ e.g.\n+\n+ with sample_dag:\n+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n+ exported_file = aql.export_file(\n+ input_data=table,\n+ output_file=File(path=\"/tmp/saved_df.csv\"),\n+ if_exists=\"replace\",\n+ )\n+ res_df = aql.load_file(input_file=exported_file)\n \n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n", "issue": "Change `export_file` to return `File` object\n**Context**\r\n\r\nIn order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.\r\n\r\n* Astro-SDK version: 0.9..1\r\n* Request by: @jlaneve\r\n* Analysed by @dimberman \r\n\r\n**Problem**\r\n\r\nAt the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:\r\n\r\n```\r\n output_file = File(path=\"/tmp/saved_df.csv\")\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n export = aql.export_file(\r\n input_data=table,\r\n output_file=output_file,\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=output_file)\r\n export >> res_df\r\n```\r\n\r\n**Desired behaviour**\r\n\r\n```\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n exported_file = aql.export_file(\r\n input_data=table,\r\n output_file=File(path=\"/tmp/saved_df.csv\"),\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=exported_file)\r\n```\r\n\r\n**Acceptance criteria**\r\n* Change `export_file` so it returns the `File` instance, as opposed to `None`\r\n\r\nSince there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> None:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object.\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}]}
| 1,694 | 395 |
gh_patches_debug_25852
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-540
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DEP: Drop `inplace` option of `filter_in`
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [x] closes #522
- [ ] whatsnew entry
</issue>
<code>
[start of dtoolkit/accessor/dataframe/drop_inf.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 import numpy as np
6 import pandas as pd
7 from pandas.util._validators import validate_bool_kwarg
8
9 from dtoolkit.accessor._util import get_inf_range
10 from dtoolkit.accessor.dataframe import boolean # noqa
11 from dtoolkit.accessor.register import register_dataframe_method
12
13
14 if TYPE_CHECKING:
15 from dtoolkit._typing import IntOrStr
16
17
18 @register_dataframe_method
19 def drop_inf(
20 df: pd.DataFrame,
21 axis: IntOrStr = 0,
22 how: str = "any",
23 inf: str = "all",
24 subset: list[str] = None,
25 inplace: bool = False,
26 ) -> pd.DataFrame | None:
27 """
28 Remove ``inf`` values.
29
30 Parameters
31 ----------
32 axis : {0 or 'index', 1 or 'columns'}, default 0
33 Determine if rows or columns which contain ``inf`` values are
34 removed.
35
36 * 0, or 'index' : Drop rows which contain ``inf`` values.
37 * 1, or 'columns' : Drop columns which contain ``inf`` value.
38
39 how : {'any', 'all'}, default 'any'
40 Determine if row or column is removed from :obj:`~pandas.DataFrame`,
41 when we have at least one ``inf`` or all ``inf``.
42
43 * 'any' : If any ``inf`` values are present, drop that row or column.
44 * 'all' : If all values are ``inf``, drop that row or column.
45
46 inf : {'all', 'pos', 'neg'}, default 'all'
47 * 'all' : Remove ``inf`` and ``-inf``.
48 * 'pos' : Only remove ``inf``.
49 * 'neg' : Only remove ``-inf``.
50
51 subset : array-like, optional
52 Labels along other axis to consider, e.g. if you are dropping rows
53 these would be a list of columns to include.
54
55 inplace : bool, default False
56 If True, do operation inplace and return None.
57
58 Returns
59 -------
60 DataFrame or None
61 DataFrame with ``inf`` entries dropped from it or None if
62 ``inplace=True``.
63
64 See Also
65 --------
66 dtoolkit.accessor.series.drop_inf
67 :obj:`~pandas.Series` drops ``inf`` values.
68
69 Examples
70 --------
71 >>> import dtoolkit.accessor
72 >>> import pandas as pd
73 >>> import numpy as np
74 >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
75 ... "toy": [np.inf, 'Batmobile', 'Bullwhip'],
76 ... "born": [np.inf, pd.Timestamp("1940-04-25"),
77 ... -np.inf]})
78 >>> df
79 name toy born
80 0 Alfred inf inf
81 1 Batman Batmobile 1940-04-25 00:00:00
82 2 Catwoman Bullwhip -inf
83
84 Drop the rows where at least one element is inf and -inf.
85
86 >>> df.drop_inf()
87 name toy born
88 1 Batman Batmobile 1940-04-25 00:00:00
89
90 Drop the columns where at least one element is inf and -inf.
91
92 >>> df.drop_inf(axis='columns')
93 name
94 0 Alfred
95 1 Batman
96 2 Catwoman
97
98 Drop the rows where all elements are inf and -inf.
99
100 >>> df.drop_inf(how='all')
101 name toy born
102 0 Alfred inf inf
103 1 Batman Batmobile 1940-04-25 00:00:00
104 2 Catwoman Bullwhip -inf
105
106 Drop the rows where at least one element is -inf.
107
108 >>> df.drop_inf(inf='neg')
109 name toy born
110 0 Alfred inf inf
111 1 Batman Batmobile 1940-04-25 00:00:00
112
113 Define in which columns to look for inf and -inf values.
114
115 >>> df.drop_inf(subset=['name', 'toy'])
116 name toy born
117 1 Batman Batmobile 1940-04-25 00:00:00
118 2 Catwoman Bullwhip -inf
119
120 Keep the DataFrame with valid entries in the same variable.
121
122 >>> df.drop_inf(inplace=True)
123 >>> df
124 name toy born
125 1 Batman Batmobile 1940-04-25 00:00:00
126 """
127
128 inplace = validate_bool_kwarg(inplace, "inplace")
129
130 axis = df._get_axis_number(axis)
131 agg_axis = 1 - axis
132
133 agg_obj = df
134 if subset is not None:
135 ax = df._get_axis(agg_axis)
136 indices = ax.get_indexer_for(subset)
137 check = indices == -1
138 if check.any():
139 raise KeyError(list(np.compress(check, subset)))
140
141 agg_obj = df.take(indices, axis=agg_axis)
142
143 inf_range = get_inf_range(inf)
144 mask = agg_obj.isin(inf_range).boolean(how=how, axis=agg_axis)
145 result = df.loc(axis=axis)[~mask]
146
147 if not inplace:
148 return result
149
150 df._update_inplace(result)
151
[end of dtoolkit/accessor/dataframe/drop_inf.py]
[start of dtoolkit/accessor/series/drop_inf.py]
1 from __future__ import annotations
2
3 import pandas as pd
4 from pandas.util._validators import validate_bool_kwarg
5
6 from dtoolkit.accessor._util import get_inf_range
7 from dtoolkit.accessor.register import register_series_method
8
9
10 @register_series_method
11 def drop_inf(
12 s: pd.Series,
13 inf: str = "all",
14 inplace: bool = False,
15 ) -> pd.Series | None:
16 """
17 Remove ``inf`` values.
18
19 Parameters
20 ----------
21 inf : {'all', 'pos', 'neg'}, default 'all'
22
23 * 'all' : Remove ``inf`` and ``-inf``.
24 * 'pos' : Only remove ``inf``.
25 * 'neg' : Only remove ``-inf``.
26
27 inplace : bool, default False
28 If True, do operation inplace and return None.
29
30 Returns
31 -------
32 Series or None
33 Series with ``inf`` entries dropped from it or None if
34 ``inplace=True``.
35
36 See Also
37 --------
38 dtoolkit.accessor.dataframe.drop_inf
39 :obj:`~pandas.DataFrame` drops rows or columns which contain ``inf``
40 values.
41
42 Examples
43 --------
44 >>> import dtoolkit.accessor
45 >>> import pandas as pd
46 >>> import numpy as np
47 >>> s = pd.Series([1., 2., np.inf])
48 >>> s
49 0 1.0
50 1 2.0
51 2 inf
52 dtype: float64
53
54 Drop inf values from a Series.
55
56 >>> s.drop_inf()
57 0 1.0
58 1 2.0
59 dtype: float64
60
61 Keep the Series with valid entries in the same variable.
62
63 >>> s.drop_inf(inplace=True)
64 >>> s
65 0 1.0
66 1 2.0
67 dtype: float64
68 """
69
70 inplace = validate_bool_kwarg(inplace, "inplace")
71 inf_range = get_inf_range(inf)
72 mask = s.isin(inf_range)
73 result = s[~mask]
74
75 if not inplace:
76 return result
77
78 s._update_inplace(result)
79
[end of dtoolkit/accessor/series/drop_inf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dtoolkit/accessor/dataframe/drop_inf.py b/dtoolkit/accessor/dataframe/drop_inf.py
--- a/dtoolkit/accessor/dataframe/drop_inf.py
+++ b/dtoolkit/accessor/dataframe/drop_inf.py
@@ -9,6 +9,7 @@
from dtoolkit.accessor._util import get_inf_range
from dtoolkit.accessor.dataframe import boolean # noqa
from dtoolkit.accessor.register import register_dataframe_method
+from dtoolkit.util._decorator import deprecated_kwargs
if TYPE_CHECKING:
@@ -16,6 +17,13 @@
@register_dataframe_method
+@deprecated_kwargs(
+ "inplace",
+ message=(
+ "The keyword argument '{argument}' of '{func_name}' is deprecated and will "
+ "be removed in 0.0.17. (Warning added DToolKit 0.0.16)"
+ ),
+)
def drop_inf(
df: pd.DataFrame,
axis: IntOrStr = 0,
diff --git a/dtoolkit/accessor/series/drop_inf.py b/dtoolkit/accessor/series/drop_inf.py
--- a/dtoolkit/accessor/series/drop_inf.py
+++ b/dtoolkit/accessor/series/drop_inf.py
@@ -5,9 +5,17 @@
from dtoolkit.accessor._util import get_inf_range
from dtoolkit.accessor.register import register_series_method
+from dtoolkit.util._decorator import deprecated_kwargs
@register_series_method
+@deprecated_kwargs(
+ "inplace",
+ message=(
+ "The keyword argument '{argument}' of '{func_name}' is deprecated and will "
+ "be removed in 0.0.17. (Warning added DToolKit 0.0.16)"
+ ),
+)
def drop_inf(
s: pd.Series,
inf: str = "all",
|
{"golden_diff": "diff --git a/dtoolkit/accessor/dataframe/drop_inf.py b/dtoolkit/accessor/dataframe/drop_inf.py\n--- a/dtoolkit/accessor/dataframe/drop_inf.py\n+++ b/dtoolkit/accessor/dataframe/drop_inf.py\n@@ -9,6 +9,7 @@\n from dtoolkit.accessor._util import get_inf_range\n from dtoolkit.accessor.dataframe import boolean # noqa\n from dtoolkit.accessor.register import register_dataframe_method\n+from dtoolkit.util._decorator import deprecated_kwargs\n \n \n if TYPE_CHECKING:\n@@ -16,6 +17,13 @@\n \n \n @register_dataframe_method\n+@deprecated_kwargs(\n+ \"inplace\",\n+ message=(\n+ \"The keyword argument '{argument}' of '{func_name}' is deprecated and will \"\n+ \"be removed in 0.0.17. (Warning added DToolKit 0.0.16)\"\n+ ),\n+)\n def drop_inf(\n df: pd.DataFrame,\n axis: IntOrStr = 0,\ndiff --git a/dtoolkit/accessor/series/drop_inf.py b/dtoolkit/accessor/series/drop_inf.py\n--- a/dtoolkit/accessor/series/drop_inf.py\n+++ b/dtoolkit/accessor/series/drop_inf.py\n@@ -5,9 +5,17 @@\n \n from dtoolkit.accessor._util import get_inf_range\n from dtoolkit.accessor.register import register_series_method\n+from dtoolkit.util._decorator import deprecated_kwargs\n \n \n @register_series_method\n+@deprecated_kwargs(\n+ \"inplace\",\n+ message=(\n+ \"The keyword argument '{argument}' of '{func_name}' is deprecated and will \"\n+ \"be removed in 0.0.17. (Warning added DToolKit 0.0.16)\"\n+ ),\n+)\n def drop_inf(\n s: pd.Series,\n inf: str = \"all\",\n", "issue": "DEP: Drop `inplace` option of `filter_in`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #522\r\n- [ ] whatsnew entry\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_inf_range\nfrom dtoolkit.accessor.dataframe import boolean # noqa\nfrom dtoolkit.accessor.register import register_dataframe_method\n\n\nif TYPE_CHECKING:\n from dtoolkit._typing import IntOrStr\n\n\n@register_dataframe_method\ndef drop_inf(\n df: pd.DataFrame,\n axis: IntOrStr = 0,\n how: str = \"any\",\n inf: str = \"all\",\n subset: list[str] = None,\n inplace: bool = False,\n) -> pd.DataFrame | None:\n \"\"\"\n Remove ``inf`` values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain ``inf`` values are\n removed.\n\n * 0, or 'index' : Drop rows which contain ``inf`` values.\n * 1, or 'columns' : Drop columns which contain ``inf`` value.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from :obj:`~pandas.DataFrame`,\n when we have at least one ``inf`` or all ``inf``.\n\n * 'any' : If any ``inf`` values are present, drop that row or column.\n * 'all' : If all values are ``inf``, drop that row or column.\n\n inf : {'all', 'pos', 'neg'}, default 'all'\n * 'all' : Remove ``inf`` and ``-inf``.\n * 'pos' : Only remove ``inf``.\n * 'neg' : Only remove ``-inf``.\n\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with ``inf`` entries dropped from it or None if\n ``inplace=True``.\n\n See Also\n --------\n dtoolkit.accessor.series.drop_inf\n :obj:`~pandas.Series` drops ``inf`` values.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.inf, 'Batmobile', 'Bullwhip'],\n ... \"born\": [np.inf, pd.Timestamp(\"1940-04-25\"),\n ... -np.inf]})\n >>> df\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Drop the rows where at least one element is inf and -inf.\n\n >>> df.drop_inf()\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n\n Drop the columns where at least one element is inf and -inf.\n\n >>> df.drop_inf(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are inf and -inf.\n\n >>> df.drop_inf(how='all')\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Drop the rows where at least one element is -inf.\n\n >>> df.drop_inf(inf='neg')\n name toy born\n 0 Alfred inf inf\n 1 Batman Batmobile 1940-04-25 00:00:00\n\n Define in which columns to look for inf and -inf values.\n\n >>> df.drop_inf(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n 2 Catwoman Bullwhip -inf\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.drop_inf(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25 00:00:00\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n\n axis = df._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = df\n if subset is not None:\n ax = df._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n\n agg_obj = df.take(indices, axis=agg_axis)\n\n inf_range = get_inf_range(inf)\n mask = agg_obj.isin(inf_range).boolean(how=how, axis=agg_axis)\n result = df.loc(axis=axis)[~mask]\n\n if not inplace:\n return result\n\n df._update_inplace(result)\n", "path": "dtoolkit/accessor/dataframe/drop_inf.py"}, {"content": "from __future__ import annotations\n\nimport pandas as pd\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom dtoolkit.accessor._util import get_inf_range\nfrom dtoolkit.accessor.register import register_series_method\n\n\n@register_series_method\ndef drop_inf(\n s: pd.Series,\n inf: str = \"all\",\n inplace: bool = False,\n) -> pd.Series | None:\n \"\"\"\n Remove ``inf`` values.\n\n Parameters\n ----------\n inf : {'all', 'pos', 'neg'}, default 'all'\n\n * 'all' : Remove ``inf`` and ``-inf``.\n * 'pos' : Only remove ``inf``.\n * 'neg' : Only remove ``-inf``.\n\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n Series or None\n Series with ``inf`` entries dropped from it or None if\n ``inplace=True``.\n\n See Also\n --------\n dtoolkit.accessor.dataframe.drop_inf\n :obj:`~pandas.DataFrame` drops rows or columns which contain ``inf``\n values.\n\n Examples\n --------\n >>> import dtoolkit.accessor\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series([1., 2., np.inf])\n >>> s\n 0 1.0\n 1 2.0\n 2 inf\n dtype: float64\n\n Drop inf values from a Series.\n\n >>> s.drop_inf()\n 0 1.0\n 1 2.0\n dtype: float64\n\n Keep the Series with valid entries in the same variable.\n\n >>> s.drop_inf(inplace=True)\n >>> s\n 0 1.0\n 1 2.0\n dtype: float64\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n inf_range = get_inf_range(inf)\n mask = s.isin(inf_range)\n result = s[~mask]\n\n if not inplace:\n return result\n\n s._update_inplace(result)\n", "path": "dtoolkit/accessor/series/drop_inf.py"}]}
| 3,016 | 427 |
gh_patches_debug_39269
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-1019
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_file_stream support
node-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.
</issue>
<code>
[start of telegram/files/file.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains an object that represents a Telegram File."""
20 from os.path import basename
21
22 from future.backports.urllib import parse as urllib_parse
23
24 from telegram import TelegramObject
25
26
27 class File(TelegramObject):
28 """
29 This object represents a file ready to be downloaded. The file can be downloaded with
30 :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the
31 link expires, a new one can be requested by calling getFile.
32
33 Note:
34 Maximum file size to download is 20 MB
35
36 Attributes:
37 file_id (:obj:`str`): Unique identifier for this file.
38 file_size (:obj:`str`): Optional. File size.
39 file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.
40
41 Args:
42 file_id (:obj:`str`): Unique identifier for this file.
43 file_size (:obj:`int`, optional): Optional. File size, if known.
44 file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.
45 bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.
46 **kwargs (:obj:`dict`): Arbitrary keyword arguments.
47
48 """
49
50 def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):
51 # Required
52 self.file_id = str(file_id)
53
54 # Optionals
55 self.file_size = file_size
56 self.file_path = file_path
57
58 self.bot = bot
59
60 self._id_attrs = (self.file_id,)
61
62 @classmethod
63 def de_json(cls, data, bot):
64 if not data:
65 return None
66
67 return cls(bot=bot, **data)
68
69 def download(self, custom_path=None, out=None, timeout=None):
70 """
71 Download this file. By default, the file is saved in the current working directory with its
72 original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be
73 saved to that path instead. If :attr:`out` is defined, the file contents will be saved to
74 that object using the ``out.write`` method.
75
76 Note:
77 `custom_path` and `out` are mutually exclusive.
78
79 Args:
80 custom_path (:obj:`str`, optional): Custom path.
81 out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
82 applicable.
83 timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
84 the read timeout from the server (instead of the one specified during creation of
85 the connection pool).
86
87 Raises:
88 ValueError: If both ``custom_path`` and ``out`` are passed.
89
90 """
91 if custom_path is not None and out is not None:
92 raise ValueError('custom_path and out are mutually exclusive')
93
94 # Convert any UTF-8 char into a url encoded ASCII string.
95 sres = urllib_parse.urlsplit(self.file_path)
96 url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
97 sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
98
99 if out:
100 buf = self.bot.request.retrieve(url)
101 out.write(buf)
102
103 else:
104 if custom_path:
105 filename = custom_path
106 else:
107 filename = basename(self.file_path)
108
109 self.bot.request.download(url, filename, timeout=timeout)
110
[end of telegram/files/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/telegram/files/file.py b/telegram/files/file.py
--- a/telegram/files/file.py
+++ b/telegram/files/file.py
@@ -74,32 +74,34 @@
that object using the ``out.write`` method.
Note:
- `custom_path` and `out` are mutually exclusive.
+ :attr:`custom_path` and :attr:`out` are mutually exclusive.
Args:
custom_path (:obj:`str`, optional): Custom path.
- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
- applicable.
+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for
+ writing in binary mode, if applicable.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
+ Returns:
+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.
+ Otherwise, returns the filename downloaded to.
+
Raises:
- ValueError: If both ``custom_path`` and ``out`` are passed.
+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.
"""
if custom_path is not None and out is not None:
raise ValueError('custom_path and out are mutually exclusive')
# Convert any UTF-8 char into a url encoded ASCII string.
- sres = urllib_parse.urlsplit(self.file_path)
- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+ url = self._get_encoded_url()
if out:
buf = self.bot.request.retrieve(url)
out.write(buf)
-
+ return out
else:
if custom_path:
filename = custom_path
@@ -107,3 +109,27 @@
filename = basename(self.file_path)
self.bot.request.download(url, filename, timeout=timeout)
+ return filename
+
+ def _get_encoded_url(self):
+ """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string."""
+ sres = urllib_parse.urlsplit(self.file_path)
+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(
+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+
+ def download_as_bytearray(self, buf=None):
+ """Download this file and return it as a bytearray.
+
+ Args:
+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.
+
+ Returns:
+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly
+ allocated :obj:`bytearray`.
+
+ """
+ if buf is None:
+ buf = bytearray()
+
+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))
+ return buf
|
{"golden_diff": "diff --git a/telegram/files/file.py b/telegram/files/file.py\n--- a/telegram/files/file.py\n+++ b/telegram/files/file.py\n@@ -74,32 +74,34 @@\n that object using the ``out.write`` method.\n \n Note:\n- `custom_path` and `out` are mutually exclusive.\n+ :attr:`custom_path` and :attr:`out` are mutually exclusive.\n \n Args:\n custom_path (:obj:`str`, optional): Custom path.\n- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n- applicable.\n+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for\n+ writing in binary mode, if applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n \n+ Returns:\n+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.\n+ Otherwise, returns the filename downloaded to.\n+\n Raises:\n- ValueError: If both ``custom_path`` and ``out`` are passed.\n+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.\n \n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n \n # Convert any UTF-8 char into a url encoded ASCII string.\n- sres = urllib_parse.urlsplit(self.file_path)\n- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+ url = self._get_encoded_url()\n \n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n-\n+ return out\n else:\n if custom_path:\n filename = custom_path\n@@ -107,3 +109,27 @@\n filename = basename(self.file_path)\n \n self.bot.request.download(url, filename, timeout=timeout)\n+ return filename\n+\n+ def _get_encoded_url(self):\n+ \"\"\"Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.\"\"\"\n+ sres = urllib_parse.urlsplit(self.file_path)\n+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(\n+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+\n+ def download_as_bytearray(self, buf=None):\n+ \"\"\"Download this file and return it as a bytearray.\n+\n+ Args:\n+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.\n+\n+ Returns:\n+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly\n+ allocated :obj:`bytearray`.\n+\n+ \"\"\"\n+ if buf is None:\n+ buf = bytearray()\n+\n+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))\n+ return buf\n", "issue": "get_file_stream support\nnode-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram File.\"\"\"\nfrom os.path import basename\n\nfrom future.backports.urllib import parse as urllib_parse\n\nfrom telegram import TelegramObject\n\n\nclass File(TelegramObject):\n \"\"\"\n This object represents a file ready to be downloaded. The file can be downloaded with\n :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the\n link expires, a new one can be requested by calling getFile.\n\n Note:\n Maximum file size to download is 20 MB\n\n Attributes:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`str`): Optional. File size.\n file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.\n\n Args:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`int`, optional): Optional. File size, if known.\n file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.\n bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n \"\"\"\n\n def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):\n # Required\n self.file_id = str(file_id)\n\n # Optionals\n self.file_size = file_size\n self.file_path = file_path\n\n self.bot = bot\n\n self._id_attrs = (self.file_id,)\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n return cls(bot=bot, **data)\n\n def download(self, custom_path=None, out=None, timeout=None):\n \"\"\"\n Download this file. By default, the file is saved in the current working directory with its\n original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be\n saved to that path instead. If :attr:`out` is defined, the file contents will be saved to\n that object using the ``out.write`` method.\n\n Note:\n `custom_path` and `out` are mutually exclusive.\n\n Args:\n custom_path (:obj:`str`, optional): Custom path.\n out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n\n Raises:\n ValueError: If both ``custom_path`` and ``out`` are passed.\n\n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n\n # Convert any UTF-8 char into a url encoded ASCII string.\n sres = urllib_parse.urlsplit(self.file_path)\n url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n\n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n\n else:\n if custom_path:\n filename = custom_path\n else:\n filename = basename(self.file_path)\n\n self.bot.request.download(url, filename, timeout=timeout)\n", "path": "telegram/files/file.py"}]}
| 1,766 | 705 |
gh_patches_debug_32392
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-2387
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`set_qiskit_logger()` is hard to use
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
A utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.
It has room for improvement, namely:
- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.
- One cannot customize the LOG_LEVEL.
</issue>
<code>
[start of qiskit/tools/logging.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Utilities for logging."""
16
17 import logging
18 from logging.config import dictConfig
19
20
21 class SimpleInfoFormatter(logging.Formatter):
22 """Custom Formatter that uses a simple format for INFO."""
23 _style_info = logging._STYLES['%'][0]('%(message)s')
24
25 def formatMessage(self, record):
26 if record.levelno == logging.INFO:
27 return self._style_info.format(record)
28 return logging.Formatter.formatMessage(self, record)
29
30
31 QISKIT_LOGGING_CONFIG = {
32 'version': 1,
33 'disable_existing_loggers': False,
34 'formatters': {
35 'f': {
36 '()': SimpleInfoFormatter,
37 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
38 },
39 },
40 'handlers': {
41 'h': {
42 'class': 'logging.StreamHandler',
43 'formatter': 'f'
44 }
45 },
46 'loggers': {
47 'qiskit': {
48 'handlers': ['h'],
49 'level': logging.INFO,
50 },
51 }
52 }
53
54
55 def set_qiskit_logger():
56 """Update 'qiskit' logger configuration using a SDK default one.
57
58 Update the configuration of the 'qiskit' logger using the default SDK
59 configuration provided by `QISKIT_LOGGING_CONFIG`:
60
61 * console logging using a custom format for levels != INFO.
62 * console logging with simple format for level INFO.
63 * set logger level to INFO.
64
65 Warning:
66 This function modifies the configuration of the standard logging system
67 for the 'qiskit.*' loggers, and might interfere with custom logger
68 configurations.
69 """
70 dictConfig(QISKIT_LOGGING_CONFIG)
71
72
73 def unset_qiskit_logger():
74 """Remove the handlers for the 'qiskit' logger."""
75 qiskit_logger = logging.getLogger('qiskit')
76 for handler in qiskit_logger.handlers:
77 qiskit_logger.removeHandler(handler)
78
[end of qiskit/tools/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py
deleted file mode 100644
--- a/qiskit/tools/logging.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Qiskit.
-#
-# (C) Copyright IBM 2017.
-#
-# This code is licensed under the Apache License, Version 2.0. You may
-# obtain a copy of this license in the LICENSE.txt file in the root directory
-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
-#
-# Any modifications or derivative works of this code must retain this
-# copyright notice, and modified files need to carry a notice indicating
-# that they have been altered from the originals.
-
-"""Utilities for logging."""
-
-import logging
-from logging.config import dictConfig
-
-
-class SimpleInfoFormatter(logging.Formatter):
- """Custom Formatter that uses a simple format for INFO."""
- _style_info = logging._STYLES['%'][0]('%(message)s')
-
- def formatMessage(self, record):
- if record.levelno == logging.INFO:
- return self._style_info.format(record)
- return logging.Formatter.formatMessage(self, record)
-
-
-QISKIT_LOGGING_CONFIG = {
- 'version': 1,
- 'disable_existing_loggers': False,
- 'formatters': {
- 'f': {
- '()': SimpleInfoFormatter,
- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
- },
- },
- 'handlers': {
- 'h': {
- 'class': 'logging.StreamHandler',
- 'formatter': 'f'
- }
- },
- 'loggers': {
- 'qiskit': {
- 'handlers': ['h'],
- 'level': logging.INFO,
- },
- }
-}
-
-
-def set_qiskit_logger():
- """Update 'qiskit' logger configuration using a SDK default one.
-
- Update the configuration of the 'qiskit' logger using the default SDK
- configuration provided by `QISKIT_LOGGING_CONFIG`:
-
- * console logging using a custom format for levels != INFO.
- * console logging with simple format for level INFO.
- * set logger level to INFO.
-
- Warning:
- This function modifies the configuration of the standard logging system
- for the 'qiskit.*' loggers, and might interfere with custom logger
- configurations.
- """
- dictConfig(QISKIT_LOGGING_CONFIG)
-
-
-def unset_qiskit_logger():
- """Remove the handlers for the 'qiskit' logger."""
- qiskit_logger = logging.getLogger('qiskit')
- for handler in qiskit_logger.handlers:
- qiskit_logger.removeHandler(handler)
|
{"golden_diff": "diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py\ndeleted file mode 100644\n--- a/qiskit/tools/logging.py\n+++ /dev/null\n@@ -1,77 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\n-# This code is part of Qiskit.\n-#\n-# (C) Copyright IBM 2017.\n-#\n-# This code is licensed under the Apache License, Version 2.0. You may\n-# obtain a copy of this license in the LICENSE.txt file in the root directory\n-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n-#\n-# Any modifications or derivative works of this code must retain this\n-# copyright notice, and modified files need to carry a notice indicating\n-# that they have been altered from the originals.\n-\n-\"\"\"Utilities for logging.\"\"\"\n-\n-import logging\n-from logging.config import dictConfig\n-\n-\n-class SimpleInfoFormatter(logging.Formatter):\n- \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n- _style_info = logging._STYLES['%'][0]('%(message)s')\n-\n- def formatMessage(self, record):\n- if record.levelno == logging.INFO:\n- return self._style_info.format(record)\n- return logging.Formatter.formatMessage(self, record)\n-\n-\n-QISKIT_LOGGING_CONFIG = {\n- 'version': 1,\n- 'disable_existing_loggers': False,\n- 'formatters': {\n- 'f': {\n- '()': SimpleInfoFormatter,\n- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n- },\n- },\n- 'handlers': {\n- 'h': {\n- 'class': 'logging.StreamHandler',\n- 'formatter': 'f'\n- }\n- },\n- 'loggers': {\n- 'qiskit': {\n- 'handlers': ['h'],\n- 'level': logging.INFO,\n- },\n- }\n-}\n-\n-\n-def set_qiskit_logger():\n- \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n-\n- Update the configuration of the 'qiskit' logger using the default SDK\n- configuration provided by `QISKIT_LOGGING_CONFIG`:\n-\n- * console logging using a custom format for levels != INFO.\n- * console logging with simple format for level INFO.\n- * set logger level to INFO.\n-\n- Warning:\n- This function modifies the configuration of the standard logging system\n- for the 'qiskit.*' loggers, and might interfere with custom logger\n- configurations.\n- \"\"\"\n- dictConfig(QISKIT_LOGGING_CONFIG)\n-\n-\n-def unset_qiskit_logger():\n- \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n- qiskit_logger = logging.getLogger('qiskit')\n- for handler in qiskit_logger.handlers:\n- qiskit_logger.removeHandler(handler)\n", "issue": "`set_qiskit_logger()` is hard to use\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nA utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.\r\n\r\nIt has room for improvement, namely:\r\n- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.\r\n- One cannot customize the LOG_LEVEL.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utilities for logging.\"\"\"\n\nimport logging\nfrom logging.config import dictConfig\n\n\nclass SimpleInfoFormatter(logging.Formatter):\n \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n _style_info = logging._STYLES['%'][0]('%(message)s')\n\n def formatMessage(self, record):\n if record.levelno == logging.INFO:\n return self._style_info.format(record)\n return logging.Formatter.formatMessage(self, record)\n\n\nQISKIT_LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'f': {\n '()': SimpleInfoFormatter,\n 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n },\n },\n 'handlers': {\n 'h': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'f'\n }\n },\n 'loggers': {\n 'qiskit': {\n 'handlers': ['h'],\n 'level': logging.INFO,\n },\n }\n}\n\n\ndef set_qiskit_logger():\n \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n\n Update the configuration of the 'qiskit' logger using the default SDK\n configuration provided by `QISKIT_LOGGING_CONFIG`:\n\n * console logging using a custom format for levels != INFO.\n * console logging with simple format for level INFO.\n * set logger level to INFO.\n\n Warning:\n This function modifies the configuration of the standard logging system\n for the 'qiskit.*' loggers, and might interfere with custom logger\n configurations.\n \"\"\"\n dictConfig(QISKIT_LOGGING_CONFIG)\n\n\ndef unset_qiskit_logger():\n \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n qiskit_logger = logging.getLogger('qiskit')\n for handler in qiskit_logger.handlers:\n qiskit_logger.removeHandler(handler)\n", "path": "qiskit/tools/logging.py"}]}
| 1,419 | 656 |
gh_patches_debug_12334
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-877
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E3002, E3003, E3012 - AWS::SSM::MaintenanceWindowTarget errors
cfn-lint version: 0.19.1
cfn-lint -u has been run
I am unable to to get a clean lint on a SSM template file that seems alright and that has loaded properly in AWS. Would really appreciate any assistance.
The documentation is here on MaintenanceWindowTarget:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ssm-maintenancewindowtarget.html
And the associated targets:
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtarget-targets.html
The below cf should be syntactically correct.....
```
BaselinePatchDailySSMMaintenanceWindowTarget:
Type: AWS::SSM::MaintenanceWindowTarget
Properties:
Name: BaselinePatchDailyTarget
Description: Systems with Tag Key=Patch,Value=Daily.
WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow
ResourceType: INSTANCE
Targets:
- Key: tag:Patch
Values:
- Daily
- daily
```
Results in the following cfn-lint errors:
```
E0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
E0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
E3012 Property Resources/BaselinePatchDailySSMMaintenanceWindowTarget/Properties/Targets/0 should be of type String
BaselineSSMConfig.yaml:292:9
```
Edited file as follows to attempt to make target a string:
```
BaselinePatchDailySSMMaintenanceWindowTarget:
Type: AWS::SSM::MaintenanceWindowTarget
Properties:
Name: BaselinePatchDailyTarget
Description: Systems with Tag Key=Patch,Value=Daily.
WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow
ResourceType: INSTANCE
Targets: Key=tag:Patch,Values=Daily,daily
```
Results in the following cfn-lint error:
```
E3002 Property Targets should be of type List for resource BaselinePatchDailySSMMaintenanceWindowTarget
BaselineSSMConfig.yaml:291:7
```
Attempting to make the string a list:
```
BaselinePatchDailySSMMaintenanceWindowTarget:
Type: AWS::SSM::MaintenanceWindowTarget
Properties:
Name: BaselinePatchDailyTarget
Description: Systems with Tag Key=Patch,Value=Daily.
WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow
ResourceType: INSTANCE
Targets:
- Key=tag:Patch,Values=Daily,daily
```
Results in the following errors:
```
E0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
E0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'
BaselineSSMConfig.yaml:1:1
```
Thanks!
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/ValuePrimitiveType.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import sys
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21 import cfnlint.helpers
22
23
24 class ValuePrimitiveType(CloudFormationLintRule):
25 """Check if Resource PrimitiveTypes are correct"""
26 id = 'E3012'
27 shortdesc = 'Check resource properties values'
28 description = 'Checks resource property values with Primitive Types for ' \
29 'values that match those types.'
30 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#valueprimitivetype'
31 tags = ['resources']
32
33 def __init__(self):
34 """Init"""
35 super(ValuePrimitiveType, self).__init__()
36 self.resource_specs = []
37 self.property_specs = []
38 self.config_definition = {
39 'strict': {
40 'default': True,
41 'type': 'boolean'
42 }
43 }
44 self.configure()
45
46 def initialize(self, cfn):
47 """Initialize the rule"""
48 specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])
49 self.property_specs = specs.get('PropertyTypes')
50 self.resource_specs = specs.get('ResourceTypes')
51 for resource_spec in self.resource_specs:
52 self.resource_property_types.append(resource_spec)
53 for property_spec in self.property_specs:
54 self.resource_sub_property_types.append(property_spec)
55
56 def _value_check(self, value, path, item_type, extra_args):
57 """ Checks non strict """
58 matches = []
59 if not self.config['strict']:
60 try:
61 if item_type in ['String']:
62 str(value)
63 elif item_type in ['Boolean']:
64 if value not in ['True', 'true', 'False', 'false']:
65 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
66 matches.append(RuleMatch(path, message, **extra_args))
67 elif item_type in ['Integer', 'Long', 'Double']:
68 if isinstance(value, bool):
69 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
70 matches.append(RuleMatch(path, message, **extra_args))
71 elif item_type in ['Integer']:
72 int(value)
73 elif item_type in ['Long']:
74 # Some times python will strip the decimals when doing a conversion
75 if isinstance(value, float):
76 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
77 matches.append(RuleMatch(path, message, **extra_args))
78 if sys.version_info < (3,):
79 long(value) # pylint: disable=undefined-variable
80 else:
81 int(value)
82 else: # has to be a Double
83 float(value)
84 except Exception: # pylint: disable=W0703
85 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
86 matches.append(RuleMatch(path, message, **extra_args))
87 else:
88 message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)
89 matches.append(RuleMatch(path, message, **extra_args))
90
91 return matches
92
93 def check_primitive_type(self, value, item_type, path):
94 """Chec item type"""
95 matches = []
96
97 if isinstance(value, dict) and item_type == 'Json':
98 return matches
99 if item_type in ['String']:
100 if not isinstance(value, (six.string_types)):
101 extra_args = {'actual_type': type(value).__name__, 'expected_type': str.__name__}
102 matches.extend(self._value_check(value, path, item_type, extra_args))
103 elif item_type in ['Boolean']:
104 if not isinstance(value, (bool)):
105 extra_args = {'actual_type': type(value).__name__, 'expected_type': bool.__name__}
106 matches.extend(self._value_check(value, path, item_type, extra_args))
107 elif item_type in ['Double']:
108 if not isinstance(value, (float, int)):
109 extra_args = {'actual_type': type(value).__name__, 'expected_type': [float.__name__, int.__name__]}
110 matches.extend(self._value_check(value, path, item_type, extra_args))
111 elif item_type in ['Integer']:
112 if not isinstance(value, (int)):
113 extra_args = {'actual_type': type(value).__name__, 'expected_type': int.__name__}
114 matches.extend(self._value_check(value, path, item_type, extra_args))
115 elif item_type in ['Long']:
116 if sys.version_info < (3,):
117 integer_types = (int, long,) # pylint: disable=undefined-variable
118 else:
119 integer_types = (int,)
120 if not isinstance(value, integer_types):
121 extra_args = {'actual_type': type(value).__name__, 'expected_type': ' or '.join([x.__name__ for x in integer_types])}
122 matches.extend(self._value_check(value, path, item_type, extra_args))
123 elif isinstance(value, list):
124 message = 'Property should be of type %s at %s' % (item_type, '/'.join(map(str, path)))
125 extra_args = {'actual_type': type(value).__name__, 'expected_type': list.__name__}
126 matches.append(RuleMatch(path, message, **extra_args))
127
128 return matches
129
130 def check_value(self, value, path, **kwargs):
131 """Check Value"""
132 matches = []
133 primitive_type = kwargs.get('primitive_type', {})
134 item_type = kwargs.get('item_type', {})
135 if item_type in ['Map']:
136 if isinstance(value, dict):
137 for map_key, map_value in value.items():
138 if not isinstance(map_value, dict):
139 matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))
140 else:
141 matches.extend(self.check_primitive_type(value, primitive_type, path))
142
143 return matches
144
145 def check(self, cfn, properties, specs, path):
146 """Check itself"""
147 matches = []
148
149 for prop in properties:
150 if prop in specs:
151 primitive_type = specs.get(prop).get('PrimitiveType')
152 if not primitive_type:
153 primitive_type = specs.get(prop).get('PrimitiveItemType')
154 if specs.get(prop).get('Type') in ['List', 'Map']:
155 item_type = specs.get(prop).get('Type')
156 else:
157 item_type = None
158 if primitive_type:
159 matches.extend(
160 cfn.check_value(
161 properties, prop, path,
162 check_value=self.check_value,
163 primitive_type=primitive_type,
164 item_type=item_type
165 )
166 )
167
168 return matches
169
170 def match_resource_sub_properties(self, properties, property_type, path, cfn):
171 """Match for sub properties"""
172 matches = []
173
174 if self.property_specs.get(property_type, {}).get('Properties'):
175 property_specs = self.property_specs.get(property_type, {}).get('Properties', {})
176 matches.extend(self.check(cfn, properties, property_specs, path))
177
178 return matches
179
180 def match_resource_properties(self, properties, resource_type, path, cfn):
181 """Check CloudFormation Properties"""
182 matches = []
183 resource_specs = self.resource_specs.get(resource_type, {}).get('Properties', {})
184 matches.extend(self.check(cfn, properties, resource_specs, path))
185
186 return matches
187
[end of src/cfnlint/rules/resources/properties/ValuePrimitiveType.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
--- a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
+++ b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py
@@ -138,6 +138,10 @@
if not isinstance(map_value, dict):
matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))
else:
+ # some properties support primitive types and objects
+ # skip in the case it could be an object and the value is a object
+ if item_type and isinstance(value, dict):
+ return matches
matches.extend(self.check_primitive_type(value, primitive_type, path))
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n--- a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n+++ b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n@@ -138,6 +138,10 @@\n if not isinstance(map_value, dict):\n matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))\n else:\n+ # some properties support primitive types and objects\n+ # skip in the case it could be an object and the value is a object\n+ if item_type and isinstance(value, dict):\n+ return matches\n matches.extend(self.check_primitive_type(value, primitive_type, path))\n \n return matches\n", "issue": "E3002, E3003, E3012 - AWS::SSM::MaintenanceWindowTarget errors\ncfn-lint version: 0.19.1\r\n\r\ncfn-lint -u has been run\r\n\r\nI am unable to to get a clean lint on a SSM template file that seems alright and that has loaded properly in AWS. Would really appreciate any assistance. \r\n\r\nThe documentation is here on MaintenanceWindowTarget:\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ssm-maintenancewindowtarget.html\r\n\r\nAnd the associated targets:\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ssm-maintenancewindowtarget-targets.html\r\n\r\nThe below cf should be syntactically correct..... \r\n\r\n```\r\n BaselinePatchDailySSMMaintenanceWindowTarget:\r\n Type: AWS::SSM::MaintenanceWindowTarget\r\n Properties:\r\n Name: BaselinePatchDailyTarget\r\n Description: Systems with Tag Key=Patch,Value=Daily.\r\n WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow\r\n ResourceType: INSTANCE\r\n Targets:\r\n - Key: tag:Patch\r\n Values:\r\n - Daily\r\n - daily\r\n```\r\nResults in the following cfn-lint errors:\r\n\r\n```\r\nE0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n\r\nE0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n\r\nE3012 Property Resources/BaselinePatchDailySSMMaintenanceWindowTarget/Properties/Targets/0 should be of type String\r\nBaselineSSMConfig.yaml:292:9\r\n```\r\nEdited file as follows to attempt to make target a string:\r\n\r\n```\r\n BaselinePatchDailySSMMaintenanceWindowTarget:\r\n Type: AWS::SSM::MaintenanceWindowTarget\r\n Properties:\r\n Name: BaselinePatchDailyTarget\r\n Description: Systems with Tag Key=Patch,Value=Daily.\r\n WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow\r\n ResourceType: INSTANCE\r\n Targets: Key=tag:Patch,Values=Daily,daily\r\n```\r\nResults in the following cfn-lint error:\r\n\r\n```\r\nE3002 Property Targets should be of type List for resource BaselinePatchDailySSMMaintenanceWindowTarget\r\nBaselineSSMConfig.yaml:291:7\r\n```\r\nAttempting to make the string a list:\r\n```\r\n BaselinePatchDailySSMMaintenanceWindowTarget:\r\n Type: AWS::SSM::MaintenanceWindowTarget\r\n Properties:\r\n Name: BaselinePatchDailyTarget\r\n Description: Systems with Tag Key=Patch,Value=Daily.\r\n WindowId: !Ref BaselinePatchDailySSMMaintenanceWindow\r\n ResourceType: INSTANCE\r\n Targets:\r\n - Key=tag:Patch,Values=Daily,daily\r\n```\r\nResults in the following errors:\r\n```\r\nE0002 Unknown exception while processing rule E3002: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n\r\nE0002 Unknown exception while processing rule E3003: 'AWS::SSM::MaintenanceWindowTarget.Target'\r\nBaselineSSMConfig.yaml:1:1\r\n```\r\n\r\nThanks!\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass ValuePrimitiveType(CloudFormationLintRule):\n \"\"\"Check if Resource PrimitiveTypes are correct\"\"\"\n id = 'E3012'\n shortdesc = 'Check resource properties values'\n description = 'Checks resource property values with Primitive Types for ' \\\n 'values that match those types.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#valueprimitivetype'\n tags = ['resources']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(ValuePrimitiveType, self).__init__()\n self.resource_specs = []\n self.property_specs = []\n self.config_definition = {\n 'strict': {\n 'default': True,\n 'type': 'boolean'\n }\n }\n self.configure()\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])\n self.property_specs = specs.get('PropertyTypes')\n self.resource_specs = specs.get('ResourceTypes')\n for resource_spec in self.resource_specs:\n self.resource_property_types.append(resource_spec)\n for property_spec in self.property_specs:\n self.resource_sub_property_types.append(property_spec)\n\n def _value_check(self, value, path, item_type, extra_args):\n \"\"\" Checks non strict \"\"\"\n matches = []\n if not self.config['strict']:\n try:\n if item_type in ['String']:\n str(value)\n elif item_type in ['Boolean']:\n if value not in ['True', 'true', 'False', 'false']:\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer', 'Long', 'Double']:\n if isinstance(value, bool):\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer']:\n int(value)\n elif item_type in ['Long']:\n # Some times python will strip the decimals when doing a conversion\n if isinstance(value, float):\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n if sys.version_info < (3,):\n long(value) # pylint: disable=undefined-variable\n else:\n int(value)\n else: # has to be a Double\n float(value)\n except Exception: # pylint: disable=W0703\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n else:\n message = 'Property %s should be of type %s' % ('/'.join(map(str, path)), item_type)\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_primitive_type(self, value, item_type, path):\n \"\"\"Chec item type\"\"\"\n matches = []\n\n if isinstance(value, dict) and item_type == 'Json':\n return matches\n if item_type in ['String']:\n if not isinstance(value, (six.string_types)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': str.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Boolean']:\n if not isinstance(value, (bool)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': bool.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Double']:\n if not isinstance(value, (float, int)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': [float.__name__, int.__name__]}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Integer']:\n if not isinstance(value, (int)):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': int.__name__}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif item_type in ['Long']:\n if sys.version_info < (3,):\n integer_types = (int, long,) # pylint: disable=undefined-variable\n else:\n integer_types = (int,)\n if not isinstance(value, integer_types):\n extra_args = {'actual_type': type(value).__name__, 'expected_type': ' or '.join([x.__name__ for x in integer_types])}\n matches.extend(self._value_check(value, path, item_type, extra_args))\n elif isinstance(value, list):\n message = 'Property should be of type %s at %s' % (item_type, '/'.join(map(str, path)))\n extra_args = {'actual_type': type(value).__name__, 'expected_type': list.__name__}\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_value(self, value, path, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n primitive_type = kwargs.get('primitive_type', {})\n item_type = kwargs.get('item_type', {})\n if item_type in ['Map']:\n if isinstance(value, dict):\n for map_key, map_value in value.items():\n if not isinstance(map_value, dict):\n matches.extend(self.check_primitive_type(map_value, primitive_type, path + [map_key]))\n else:\n matches.extend(self.check_primitive_type(value, primitive_type, path))\n\n return matches\n\n def check(self, cfn, properties, specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n\n for prop in properties:\n if prop in specs:\n primitive_type = specs.get(prop).get('PrimitiveType')\n if not primitive_type:\n primitive_type = specs.get(prop).get('PrimitiveItemType')\n if specs.get(prop).get('Type') in ['List', 'Map']:\n item_type = specs.get(prop).get('Type')\n else:\n item_type = None\n if primitive_type:\n matches.extend(\n cfn.check_value(\n properties, prop, path,\n check_value=self.check_value,\n primitive_type=primitive_type,\n item_type=item_type\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n if self.property_specs.get(property_type, {}).get('Properties'):\n property_specs = self.property_specs.get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n resource_specs = self.resource_specs.get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValuePrimitiveType.py"}]}
| 3,515 | 175 |
gh_patches_debug_6305
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-5920
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"master" fails to build with MKL
Recently, when I try to build the "master" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:
``` .Python
ValueError: list.remove(x): x not in list
```
The error traces to this block of code <a href="https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.
I don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?
Setup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL
</issue>
<code>
[start of scipy/sparse/linalg/dsolve/setup.py]
1 #!/usr/bin/env python
2 from __future__ import division, print_function, absolute_import
3
4 from os.path import join, dirname
5 import sys
6 import os
7 import glob
8
9
10 def configuration(parent_package='',top_path=None):
11 from numpy.distutils.misc_util import Configuration
12 from numpy.distutils.system_info import get_info
13 from scipy._build_utils import get_sgemv_fix
14 from scipy._build_utils import numpy_nodepr_api
15
16 config = Configuration('dsolve',parent_package,top_path)
17 config.add_data_dir('tests')
18
19 lapack_opt = get_info('lapack_opt',notfound_action=2)
20 if sys.platform == 'win32':
21 superlu_defs = [('NO_TIMER',1)]
22 else:
23 superlu_defs = []
24 superlu_defs.append(('USE_VENDOR_BLAS',1))
25
26 superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
27
28 sources = list(glob.glob(join(superlu_src, '*.c')))
29 headers = list(glob.glob(join(superlu_src, '*.h')))
30 if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
31 # when using MSVC + MKL, lsame is already in MKL
32 sources.remove(join(superlu_src, 'lsame.c'))
33
34 config.add_library('superlu_src',
35 sources=sources,
36 macros=superlu_defs,
37 include_dirs=[superlu_src],
38 )
39
40 # Extension
41 ext_sources = ['_superlumodule.c',
42 '_superlu_utils.c',
43 '_superluobject.c']
44 ext_sources += get_sgemv_fix(lapack_opt)
45
46 config.add_extension('_superlu',
47 sources=ext_sources,
48 libraries=['superlu_src'],
49 depends=(sources + headers),
50 extra_info=lapack_opt,
51 **numpy_nodepr_api
52 )
53
54 return config
55
56 if __name__ == '__main__':
57 from numpy.distutils.core import setup
58 setup(**configuration(top_path='').todict())
59
[end of scipy/sparse/linalg/dsolve/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py
--- a/scipy/sparse/linalg/dsolve/setup.py
+++ b/scipy/sparse/linalg/dsolve/setup.py
@@ -27,9 +27,6 @@
sources = list(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
- # when using MSVC + MKL, lsame is already in MKL
- sources.remove(join(superlu_src, 'lsame.c'))
config.add_library('superlu_src',
sources=sources,
|
{"golden_diff": "diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py\n--- a/scipy/sparse/linalg/dsolve/setup.py\n+++ b/scipy/sparse/linalg/dsolve/setup.py\n@@ -27,9 +27,6 @@\n \n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n- # when using MSVC + MKL, lsame is already in MKL\n- sources.remove(join(superlu_src, 'lsame.c'))\n \n config.add_library('superlu_src',\n sources=sources,\n", "issue": "\"master\" fails to build with MKL\nRecently, when I try to build the \"master\" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:\n\n``` .Python\nValueError: list.remove(x): x not in list\n```\n\nThe error traces to this block of code <a href=\"https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32\">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.\n\nI don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?\n\nSetup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\nimport sys\nimport os\nimport glob\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n from scipy._build_utils import get_sgemv_fix\n from scipy._build_utils import numpy_nodepr_api\n\n config = Configuration('dsolve',parent_package,top_path)\n config.add_data_dir('tests')\n\n lapack_opt = get_info('lapack_opt',notfound_action=2)\n if sys.platform == 'win32':\n superlu_defs = [('NO_TIMER',1)]\n else:\n superlu_defs = []\n superlu_defs.append(('USE_VENDOR_BLAS',1))\n\n superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')\n\n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n # when using MSVC + MKL, lsame is already in MKL\n sources.remove(join(superlu_src, 'lsame.c'))\n\n config.add_library('superlu_src',\n sources=sources,\n macros=superlu_defs,\n include_dirs=[superlu_src],\n )\n\n # Extension\n ext_sources = ['_superlumodule.c',\n '_superlu_utils.c',\n '_superluobject.c']\n ext_sources += get_sgemv_fix(lapack_opt)\n\n config.add_extension('_superlu',\n sources=ext_sources,\n libraries=['superlu_src'],\n depends=(sources + headers),\n extra_info=lapack_opt,\n **numpy_nodepr_api\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/sparse/linalg/dsolve/setup.py"}]}
| 1,294 | 170 |
gh_patches_debug_34544
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-6067
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate `create_multi_gpu_supervised_trainer` APIs
**Is your feature request related to a problem? Please describe.**
As we already set up MONAI workflow based on our own SupervisedTrainer, seems no need to keep the ignite trainer APIs which were developed at the beginning of the project.
Would be nice to deprecate them:
https://github.com/Project-MONAI/MONAI/blob/dev/monai/engines/multi_gpu_supervised_trainer.py
</issue>
<code>
[start of monai/engines/multi_gpu_supervised_trainer.py]
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from __future__ import annotations
13
14 from collections.abc import Callable, Sequence
15 from typing import TYPE_CHECKING
16
17 import torch.nn
18 from torch.nn.parallel import DataParallel, DistributedDataParallel
19 from torch.optim.optimizer import Optimizer
20
21 from monai.config import IgniteInfo
22 from monai.engines.utils import get_devices_spec
23 from monai.utils import min_version, optional_import
24
25 create_supervised_trainer, _ = optional_import(
26 "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_trainer"
27 )
28 create_supervised_evaluator, _ = optional_import(
29 "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_evaluator"
30 )
31 _prepare_batch, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "_prepare_batch")
32 if TYPE_CHECKING:
33 from ignite.engine import Engine
34 from ignite.metrics import Metric
35 else:
36 Engine, _ = optional_import(
37 "ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine", as_type="decorator"
38 )
39 Metric, _ = optional_import(
40 "ignite.metrics", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Metric", as_type="decorator"
41 )
42
43 __all__ = ["create_multigpu_supervised_trainer", "create_multigpu_supervised_evaluator"]
44
45
46 def _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float:
47 return loss.item()
48
49
50 def _default_eval_transform(
51 x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor
52 ) -> tuple[torch.Tensor, torch.Tensor]:
53 return y_pred, y
54
55
56 def create_multigpu_supervised_trainer(
57 net: torch.nn.Module,
58 optimizer: Optimizer,
59 loss_fn: Callable,
60 devices: Sequence[str | torch.device] | None = None,
61 non_blocking: bool = False,
62 prepare_batch: Callable = _prepare_batch,
63 output_transform: Callable = _default_transform,
64 distributed: bool = False,
65 ) -> Engine:
66 """
67 Derived from `create_supervised_trainer` in Ignite.
68
69 Factory function for creating a trainer for supervised models.
70
71 Args:
72 net: the network to train.
73 optimizer: the optimizer to use.
74 loss_fn: the loss function to use.
75 devices: device(s) type specification (default: None).
76 Applies to both model and batches. None is all devices used, empty list is CPU only.
77 non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
78 with respect to the host. For other cases, this argument has no effect.
79 prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
80 tuple of tensors `(batch_x, batch_y)`.
81 output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
82 to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
83 distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain
84 only 1 GPU or CPU for current distributed rank.
85
86 Returns:
87 Engine: a trainer engine with supervised update function.
88
89 Note:
90 `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss
91 of the processed batch by default.
92 """
93
94 devices_ = get_devices_spec(devices)
95 if distributed:
96 if len(devices_) > 1:
97 raise ValueError(f"for distributed training, `devices` must contain only 1 GPU or CPU, but got {devices_}.")
98 net = DistributedDataParallel(net, device_ids=devices_)
99 elif len(devices_) > 1:
100 net = DataParallel(net)
101
102 return create_supervised_trainer( # type: ignore[no-any-return]
103 net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform
104 )
105
106
107 def create_multigpu_supervised_evaluator(
108 net: torch.nn.Module,
109 metrics: dict[str, Metric] | None = None,
110 devices: Sequence[str | torch.device] | None = None,
111 non_blocking: bool = False,
112 prepare_batch: Callable = _prepare_batch,
113 output_transform: Callable = _default_eval_transform,
114 distributed: bool = False,
115 ) -> Engine:
116 """
117 Derived from `create_supervised_evaluator` in Ignite.
118
119 Factory function for creating an evaluator for supervised models.
120
121 Args:
122 net: the model to train.
123 metrics: a map of metric names to Metrics.
124 devices: device(s) type specification (default: None).
125 Applies to both model and batches. None is all devices used, empty list is CPU only.
126 non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
127 with respect to the host. For other cases, this argument has no effect.
128 prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
129 tuple of tensors `(batch_x, batch_y)`.
130 output_transform: function that receives 'x', 'y', 'y_pred' and returns value
131 to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)`
132 which fits output expected by metrics. If you change it you should use `output_transform` in metrics.
133 distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain
134 only 1 GPU or CPU for current distributed rank.
135
136 Note:
137 `engine.state.output` for this engine is defined by `output_transform` parameter and is
138 a tuple of `(batch_pred, batch_y)` by default.
139
140 Returns:
141 Engine: an evaluator engine with supervised inference function.
142 """
143
144 devices_ = get_devices_spec(devices)
145
146 if distributed:
147 net = DistributedDataParallel(net, device_ids=devices_)
148 if len(devices_) > 1:
149 raise ValueError(
150 f"for distributed evaluation, `devices` must contain only 1 GPU or CPU, but got {devices_}."
151 )
152 elif len(devices_) > 1:
153 net = DataParallel(net)
154
155 return create_supervised_evaluator( # type: ignore[no-any-return]
156 net, metrics, devices_[0], non_blocking, prepare_batch, output_transform
157 )
158
[end of monai/engines/multi_gpu_supervised_trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/monai/engines/multi_gpu_supervised_trainer.py b/monai/engines/multi_gpu_supervised_trainer.py
--- a/monai/engines/multi_gpu_supervised_trainer.py
+++ b/monai/engines/multi_gpu_supervised_trainer.py
@@ -20,7 +20,7 @@
from monai.config import IgniteInfo
from monai.engines.utils import get_devices_spec
-from monai.utils import min_version, optional_import
+from monai.utils import deprecated, min_version, optional_import
create_supervised_trainer, _ = optional_import(
"ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "create_supervised_trainer"
@@ -53,6 +53,11 @@
return y_pred, y
+@deprecated(
+ since="1.1",
+ removed="1.3",
+ msg_suffix=("Native ignite engine lacks support of many MONAI features, please use `SupervisedTrainer` instead."),
+)
def create_multigpu_supervised_trainer(
net: torch.nn.Module,
optimizer: Optimizer,
@@ -100,10 +105,23 @@
net = DataParallel(net)
return create_supervised_trainer( # type: ignore[no-any-return]
- net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform
+ model=net,
+ optimizer=optimizer,
+ loss_fn=loss_fn,
+ device=devices_[0],
+ non_blocking=non_blocking,
+ prepare_batch=prepare_batch,
+ output_transform=output_transform,
)
+@deprecated(
+ since="1.1",
+ removed="1.3",
+ msg_suffix=(
+ "Native ignite evaluator lacks support of many MONAI features, please use `SupervisedEvaluator` instead."
+ ),
+)
def create_multigpu_supervised_evaluator(
net: torch.nn.Module,
metrics: dict[str, Metric] | None = None,
@@ -153,5 +171,10 @@
net = DataParallel(net)
return create_supervised_evaluator( # type: ignore[no-any-return]
- net, metrics, devices_[0], non_blocking, prepare_batch, output_transform
+ model=net,
+ metrics=metrics,
+ device=devices_[0],
+ non_blocking=non_blocking,
+ prepare_batch=prepare_batch,
+ output_transform=output_transform,
)
|
{"golden_diff": "diff --git a/monai/engines/multi_gpu_supervised_trainer.py b/monai/engines/multi_gpu_supervised_trainer.py\n--- a/monai/engines/multi_gpu_supervised_trainer.py\n+++ b/monai/engines/multi_gpu_supervised_trainer.py\n@@ -20,7 +20,7 @@\n \n from monai.config import IgniteInfo\n from monai.engines.utils import get_devices_spec\n-from monai.utils import min_version, optional_import\n+from monai.utils import deprecated, min_version, optional_import\n \n create_supervised_trainer, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_trainer\"\n@@ -53,6 +53,11 @@\n return y_pred, y\n \n \n+@deprecated(\n+ since=\"1.1\",\n+ removed=\"1.3\",\n+ msg_suffix=(\"Native ignite engine lacks support of many MONAI features, please use `SupervisedTrainer` instead.\"),\n+)\n def create_multigpu_supervised_trainer(\n net: torch.nn.Module,\n optimizer: Optimizer,\n@@ -100,10 +105,23 @@\n net = DataParallel(net)\n \n return create_supervised_trainer( # type: ignore[no-any-return]\n- net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform\n+ model=net,\n+ optimizer=optimizer,\n+ loss_fn=loss_fn,\n+ device=devices_[0],\n+ non_blocking=non_blocking,\n+ prepare_batch=prepare_batch,\n+ output_transform=output_transform,\n )\n \n \n+@deprecated(\n+ since=\"1.1\",\n+ removed=\"1.3\",\n+ msg_suffix=(\n+ \"Native ignite evaluator lacks support of many MONAI features, please use `SupervisedEvaluator` instead.\"\n+ ),\n+)\n def create_multigpu_supervised_evaluator(\n net: torch.nn.Module,\n metrics: dict[str, Metric] | None = None,\n@@ -153,5 +171,10 @@\n net = DataParallel(net)\n \n return create_supervised_evaluator( # type: ignore[no-any-return]\n- net, metrics, devices_[0], non_blocking, prepare_batch, output_transform\n+ model=net,\n+ metrics=metrics,\n+ device=devices_[0],\n+ non_blocking=non_blocking,\n+ prepare_batch=prepare_batch,\n+ output_transform=output_transform,\n )\n", "issue": "Deprecate `create_multi_gpu_supervised_trainer` APIs\n**Is your feature request related to a problem? Please describe.**\r\nAs we already set up MONAI workflow based on our own SupervisedTrainer, seems no need to keep the ignite trainer APIs which were developed at the beginning of the project.\r\nWould be nice to deprecate them:\r\nhttps://github.com/Project-MONAI/MONAI/blob/dev/monai/engines/multi_gpu_supervised_trainer.py\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom collections.abc import Callable, Sequence\nfrom typing import TYPE_CHECKING\n\nimport torch.nn\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim.optimizer import Optimizer\n\nfrom monai.config import IgniteInfo\nfrom monai.engines.utils import get_devices_spec\nfrom monai.utils import min_version, optional_import\n\ncreate_supervised_trainer, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_trainer\"\n)\ncreate_supervised_evaluator, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"create_supervised_evaluator\"\n)\n_prepare_batch, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"_prepare_batch\")\nif TYPE_CHECKING:\n from ignite.engine import Engine\n from ignite.metrics import Metric\nelse:\n Engine, _ = optional_import(\n \"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\", as_type=\"decorator\"\n )\n Metric, _ = optional_import(\n \"ignite.metrics\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Metric\", as_type=\"decorator\"\n )\n\n__all__ = [\"create_multigpu_supervised_trainer\", \"create_multigpu_supervised_evaluator\"]\n\n\ndef _default_transform(_x: torch.Tensor, _y: torch.Tensor, _y_pred: torch.Tensor, loss: torch.Tensor) -> float:\n return loss.item()\n\n\ndef _default_eval_transform(\n x: torch.Tensor, y: torch.Tensor, y_pred: torch.Tensor\n) -> tuple[torch.Tensor, torch.Tensor]:\n return y_pred, y\n\n\ndef create_multigpu_supervised_trainer(\n net: torch.nn.Module,\n optimizer: Optimizer,\n loss_fn: Callable,\n devices: Sequence[str | torch.device] | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = _default_transform,\n distributed: bool = False,\n) -> Engine:\n \"\"\"\n Derived from `create_supervised_trainer` in Ignite.\n\n Factory function for creating a trainer for supervised models.\n\n Args:\n net: the network to train.\n optimizer: the optimizer to use.\n loss_fn: the loss function to use.\n devices: device(s) type specification (default: None).\n Applies to both model and batches. None is all devices used, empty list is CPU only.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain\n only 1 GPU or CPU for current distributed rank.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n\n Note:\n `engine.state.output` for this engine is defined by `output_transform` parameter and is the loss\n of the processed batch by default.\n \"\"\"\n\n devices_ = get_devices_spec(devices)\n if distributed:\n if len(devices_) > 1:\n raise ValueError(f\"for distributed training, `devices` must contain only 1 GPU or CPU, but got {devices_}.\")\n net = DistributedDataParallel(net, device_ids=devices_)\n elif len(devices_) > 1:\n net = DataParallel(net)\n\n return create_supervised_trainer( # type: ignore[no-any-return]\n net, optimizer, loss_fn, devices_[0], non_blocking, prepare_batch, output_transform\n )\n\n\ndef create_multigpu_supervised_evaluator(\n net: torch.nn.Module,\n metrics: dict[str, Metric] | None = None,\n devices: Sequence[str | torch.device] | None = None,\n non_blocking: bool = False,\n prepare_batch: Callable = _prepare_batch,\n output_transform: Callable = _default_eval_transform,\n distributed: bool = False,\n) -> Engine:\n \"\"\"\n Derived from `create_supervised_evaluator` in Ignite.\n\n Factory function for creating an evaluator for supervised models.\n\n Args:\n net: the model to train.\n metrics: a map of metric names to Metrics.\n devices: device(s) type specification (default: None).\n Applies to both model and batches. None is all devices used, empty list is CPU only.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform: function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)`\n which fits output expected by metrics. If you change it you should use `output_transform` in metrics.\n distributed: whether convert model to `DistributedDataParallel`, if `True`, `devices` must contain\n only 1 GPU or CPU for current distributed rank.\n\n Note:\n `engine.state.output` for this engine is defined by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n\n devices_ = get_devices_spec(devices)\n\n if distributed:\n net = DistributedDataParallel(net, device_ids=devices_)\n if len(devices_) > 1:\n raise ValueError(\n f\"for distributed evaluation, `devices` must contain only 1 GPU or CPU, but got {devices_}.\"\n )\n elif len(devices_) > 1:\n net = DataParallel(net)\n\n return create_supervised_evaluator( # type: ignore[no-any-return]\n net, metrics, devices_[0], non_blocking, prepare_batch, output_transform\n )\n", "path": "monai/engines/multi_gpu_supervised_trainer.py"}]}
| 2,545 | 558 |
gh_patches_debug_16851
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-999
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Driver resolution code can not handle dataset format being None
When attempting to load dataset without format specified `None` is used as a string
Example reported on Slack:
```
2020-07-23 17:05:21,576 48721 datacube-ingest ERROR Failed to create storage unit file (Exception: 'NoneType' object has no attribute 'lower')
Traceback (most recent call last):
File "/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py", line 340, in process_tasks
executor.result(future)
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 63, in result
return func(*args, **kwargs)
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 33, in reraise
raise t.with_traceback(e, traceback)
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 36, in get_ready
result = SerialExecutor.result(futures[0])
File "/home/sopon/projects/cornucopia/odc-core/datacube/executor.py", line 63, in result
return func(*args, **kwargs)
File "/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py", line 257, in ingest_work
fuse_func=fuse_func)
File "/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py", line 589, in load_data
progress_cbk=progress_cbk)
File "/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py", line 526, in _xr_load
progress_cbk=_cbk)
File "/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py", line 703, in _fuse_measurement
src = new_datasource(BandInfo(ds, measurement.name))
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 101, in new_datasource
source_type = choose_datasource(band)
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 80, in choose_datasource
return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 37, in __call__
driver = self._find_driver(uri_scheme, fmt)
File "/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py", line 25, in _find_driver
key = (uri_scheme.lower(), fmt.lower())
AttributeError: 'NoneType' object has no attribute 'lower'
2020-07-23 17:05:21,576 48721 datacube-ingest INFO Storage unit file creation status (Created_Count: 14, Failed_Count: 15)
```
I think proper solution to that is to fallback to default driver implementation. Also at index time warning should be printed if dataset format is not specified, see #955.
</issue>
<code>
[start of datacube/drivers/readers.py]
1 from typing import List, Optional, Callable
2 from .driver_cache import load_drivers
3 from .datasource import DataSource
4 from ._tools import singleton_setup
5 from datacube.storage._base import BandInfo
6
7 DatasourceFactory = Callable[[BandInfo], DataSource] # pylint: disable=invalid-name
8
9
10 class ReaderDriverCache(object):
11 def __init__(self, group: str):
12 self._drivers = load_drivers(group)
13
14 lookup = {}
15 for driver in self._drivers.values():
16 for uri_scheme in driver.protocols:
17 for fmt in driver.formats:
18 if driver.supports(uri_scheme, fmt):
19 key = (uri_scheme.lower(), fmt.lower())
20 lookup[key] = driver
21
22 self._lookup = lookup
23
24 def _find_driver(self, uri_scheme: str, fmt: str):
25 key = (uri_scheme.lower(), fmt.lower())
26 return self._lookup.get(key)
27
28 def __call__(self, uri_scheme: str, fmt: str,
29 fallback: Optional[DatasourceFactory] = None) -> DatasourceFactory:
30 """Lookup `new_datasource` constructor method from the driver. Returns
31 `fallback` method if no driver is found.
32
33 :param uri_scheme: Protocol part of the Dataset uri
34 :param fmt: Dataset format
35 :return: Returns function `(DataSet, band_name:str) => DataSource`
36 """
37 driver = self._find_driver(uri_scheme, fmt)
38 if driver is not None:
39 return driver.new_datasource
40 if fallback is not None:
41 return fallback
42 else:
43 raise KeyError("No driver found and no fallback provided")
44
45 def drivers(self) -> List[str]:
46 """ Returns list of driver names
47 """
48 return list(self._drivers.keys())
49
50
51 def rdr_cache() -> ReaderDriverCache:
52 """ Singleton for ReaderDriverCache
53 """
54 return singleton_setup(rdr_cache, '_instance',
55 ReaderDriverCache,
56 'datacube.plugins.io.read')
57
58
59 def reader_drivers() -> List[str]:
60 """ Returns list driver names
61 """
62 return rdr_cache().drivers()
63
64
65 def choose_datasource(band: 'BandInfo') -> DatasourceFactory:
66 """Returns appropriate `DataSource` class (or a constructor method) for loading
67 given `dataset`.
68
69 An appropriate `DataSource` implementation is chosen based on:
70
71 - Dataset URI (protocol part)
72 - Dataset format
73 - Current system settings
74 - Available IO plugins
75
76 NOTE: we assume that all bands can be loaded with the same implementation.
77
78 """
79 from datacube.storage._rio import RasterDatasetDataSource
80 return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)
81
82
83 def new_datasource(band: BandInfo) -> Optional[DataSource]:
84 """Returns a newly constructed data source to read dataset band data.
85
86 An appropriate `DataSource` implementation is chosen based on:
87
88 - Dataset URI (protocol part)
89 - Dataset format
90 - Current system settings
91 - Available IO plugins
92
93 This function will return the default :class:`RasterDatasetDataSource` if no more specific
94 ``DataSource`` can be found.
95
96 :param dataset: The dataset to read.
97 :param str band_name: the name of the band to read.
98
99 """
100
101 source_type = choose_datasource(band)
102
103 if source_type is None:
104 return None
105
106 return source_type(band)
107
[end of datacube/drivers/readers.py]
[start of datacube/storage/_base.py]
1 from typing import Optional, Dict, Any, Tuple
2 from urllib.parse import urlparse
3
4 from datacube.model import Dataset
5 from datacube.utils.uris import uri_resolve, pick_uri
6
7
8 def _get_band_and_layer(b: Dict[str, Any]) -> Tuple[Optional[int], Optional[str]]:
9 """ Encode legacy logic for extracting band/layer:
10
11 on input:
12 band -- Int | Nothing
13 layer -- Str | Int | Nothing
14
15 Valid combinations are:
16 band layer Output
17 ---------------------------
18 - - ( - , - )
19 - int (int, - )
20 int - (int, - )
21 int str (int, str)
22 - str ( - , str)
23
24 """
25 band = b.get('band')
26 layer = b.get('layer')
27
28 if band is None:
29 if isinstance(layer, int):
30 return (layer, None)
31 if layer is None or isinstance(layer, str):
32 return (None, layer)
33
34 raise ValueError('Expect `layer` to be one of None,int,str but it is {}'.format(type(layer)))
35 else:
36 if not isinstance(band, int):
37 raise ValueError('Expect `band` to be an integer (it is {})'.format(type(band)))
38 if layer is not None and not isinstance(layer, str):
39 raise ValueError('Expect `layer` to be one of None,str but it is {}'.format(type(layer)))
40
41 return (band, layer)
42
43
44 def _extract_driver_data(ds: Dataset) -> Optional[Any]:
45 return ds.metadata_doc.get('driver_data', None)
46
47
48 def measurement_paths(ds: Dataset) -> Dict[str, str]:
49 """
50 Returns a dictionary mapping from band name to url pointing to band storage
51 resource.
52
53 :return: Band Name => URL
54 """
55 if ds.uris is None:
56 raise ValueError('No locations on this dataset')
57
58 base = pick_uri(ds.uris)
59 return dict((k, uri_resolve(base, m.get('path')))
60 for k, m in ds.measurements.items())
61
62
63 class BandInfo:
64 __slots__ = ('name',
65 'uri',
66 'band',
67 'layer',
68 'dtype',
69 'nodata',
70 'units',
71 'crs',
72 'transform',
73 'center_time',
74 'format',
75 'driver_data')
76
77 def __init__(self,
78 ds: Dataset,
79 band: str,
80 uri_scheme: Optional[str] = None):
81 try:
82 mp, = ds.type.lookup_measurements([band]).values()
83 except KeyError:
84 raise ValueError('No such band: {}'.format(band))
85
86 mm = ds.measurements.get(mp.canonical_name)
87
88 if mm is None:
89 raise ValueError('No such band: {}'.format(band))
90
91 if ds.uris is None:
92 raise ValueError('No uris defined on a dataset')
93
94 base_uri = pick_uri(ds.uris, uri_scheme)
95
96 bint, layer = _get_band_and_layer(mm)
97
98 self.name = band
99 self.uri = uri_resolve(base_uri, mm.get('path'))
100 self.band = bint
101 self.layer = layer
102 self.dtype = mp.dtype
103 self.nodata = mp.nodata
104 self.units = mp.units
105 self.crs = ds.crs
106 self.transform = ds.transform
107 self.format = ds.format
108 self.driver_data = _extract_driver_data(ds)
109
110 @property
111 def uri_scheme(self) -> str:
112 return urlparse(self.uri).scheme
113
[end of datacube/storage/_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/datacube/drivers/readers.py b/datacube/drivers/readers.py
--- a/datacube/drivers/readers.py
+++ b/datacube/drivers/readers.py
@@ -62,7 +62,7 @@
return rdr_cache().drivers()
-def choose_datasource(band: 'BandInfo') -> DatasourceFactory:
+def choose_datasource(band: BandInfo) -> DatasourceFactory:
"""Returns appropriate `DataSource` class (or a constructor method) for loading
given `dataset`.
diff --git a/datacube/storage/_base.py b/datacube/storage/_base.py
--- a/datacube/storage/_base.py
+++ b/datacube/storage/_base.py
@@ -104,9 +104,9 @@
self.units = mp.units
self.crs = ds.crs
self.transform = ds.transform
- self.format = ds.format
+ self.format = ds.format or ''
self.driver_data = _extract_driver_data(ds)
@property
def uri_scheme(self) -> str:
- return urlparse(self.uri).scheme
+ return urlparse(self.uri).scheme or ''
|
{"golden_diff": "diff --git a/datacube/drivers/readers.py b/datacube/drivers/readers.py\n--- a/datacube/drivers/readers.py\n+++ b/datacube/drivers/readers.py\n@@ -62,7 +62,7 @@\n return rdr_cache().drivers()\n \n \n-def choose_datasource(band: 'BandInfo') -> DatasourceFactory:\n+def choose_datasource(band: BandInfo) -> DatasourceFactory:\n \"\"\"Returns appropriate `DataSource` class (or a constructor method) for loading\n given `dataset`.\n \ndiff --git a/datacube/storage/_base.py b/datacube/storage/_base.py\n--- a/datacube/storage/_base.py\n+++ b/datacube/storage/_base.py\n@@ -104,9 +104,9 @@\n self.units = mp.units\n self.crs = ds.crs\n self.transform = ds.transform\n- self.format = ds.format\n+ self.format = ds.format or ''\n self.driver_data = _extract_driver_data(ds)\n \n @property\n def uri_scheme(self) -> str:\n- return urlparse(self.uri).scheme\n+ return urlparse(self.uri).scheme or ''\n", "issue": "Driver resolution code can not handle dataset format being None\nWhen attempting to load dataset without format specified `None` is used as a string\r\n\r\nExample reported on Slack:\r\n\r\n```\r\n2020-07-23 17:05:21,576 48721 datacube-ingest ERROR Failed to create storage unit file (Exception: 'NoneType' object has no attribute 'lower') \r\nTraceback (most recent call last):\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py\", line 340, in process_tasks\r\n executor.result(future)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 63, in result\r\n return func(*args, **kwargs)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 33, in reraise\r\n raise t.with_traceback(e, traceback)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 36, in get_ready\r\n result = SerialExecutor.result(futures[0])\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/executor.py\", line 63, in result\r\n return func(*args, **kwargs)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/scripts/ingest.py\", line 257, in ingest_work\r\n fuse_func=fuse_func)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py\", line 589, in load_data\r\n progress_cbk=progress_cbk)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py\", line 526, in _xr_load\r\n progress_cbk=_cbk)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/api/core.py\", line 703, in _fuse_measurement\r\n src = new_datasource(BandInfo(ds, measurement.name))\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 101, in new_datasource\r\n source_type = choose_datasource(band)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 80, in choose_datasource\r\n return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 37, in __call__\r\n driver = self._find_driver(uri_scheme, fmt)\r\n File \"/home/sopon/projects/cornucopia/odc-core/datacube/drivers/readers.py\", line 25, in _find_driver\r\n key = (uri_scheme.lower(), fmt.lower())\r\nAttributeError: 'NoneType' object has no attribute 'lower'\r\n2020-07-23 17:05:21,576 48721 datacube-ingest INFO Storage unit file creation status (Created_Count: 14, Failed_Count: 15)\r\n```\r\n\r\nI think proper solution to that is to fallback to default driver implementation. Also at index time warning should be printed if dataset format is not specified, see #955.\n", "before_files": [{"content": "from typing import List, Optional, Callable\nfrom .driver_cache import load_drivers\nfrom .datasource import DataSource\nfrom ._tools import singleton_setup\nfrom datacube.storage._base import BandInfo\n\nDatasourceFactory = Callable[[BandInfo], DataSource] # pylint: disable=invalid-name\n\n\nclass ReaderDriverCache(object):\n def __init__(self, group: str):\n self._drivers = load_drivers(group)\n\n lookup = {}\n for driver in self._drivers.values():\n for uri_scheme in driver.protocols:\n for fmt in driver.formats:\n if driver.supports(uri_scheme, fmt):\n key = (uri_scheme.lower(), fmt.lower())\n lookup[key] = driver\n\n self._lookup = lookup\n\n def _find_driver(self, uri_scheme: str, fmt: str):\n key = (uri_scheme.lower(), fmt.lower())\n return self._lookup.get(key)\n\n def __call__(self, uri_scheme: str, fmt: str,\n fallback: Optional[DatasourceFactory] = None) -> DatasourceFactory:\n \"\"\"Lookup `new_datasource` constructor method from the driver. Returns\n `fallback` method if no driver is found.\n\n :param uri_scheme: Protocol part of the Dataset uri\n :param fmt: Dataset format\n :return: Returns function `(DataSet, band_name:str) => DataSource`\n \"\"\"\n driver = self._find_driver(uri_scheme, fmt)\n if driver is not None:\n return driver.new_datasource\n if fallback is not None:\n return fallback\n else:\n raise KeyError(\"No driver found and no fallback provided\")\n\n def drivers(self) -> List[str]:\n \"\"\" Returns list of driver names\n \"\"\"\n return list(self._drivers.keys())\n\n\ndef rdr_cache() -> ReaderDriverCache:\n \"\"\" Singleton for ReaderDriverCache\n \"\"\"\n return singleton_setup(rdr_cache, '_instance',\n ReaderDriverCache,\n 'datacube.plugins.io.read')\n\n\ndef reader_drivers() -> List[str]:\n \"\"\" Returns list driver names\n \"\"\"\n return rdr_cache().drivers()\n\n\ndef choose_datasource(band: 'BandInfo') -> DatasourceFactory:\n \"\"\"Returns appropriate `DataSource` class (or a constructor method) for loading\n given `dataset`.\n\n An appropriate `DataSource` implementation is chosen based on:\n\n - Dataset URI (protocol part)\n - Dataset format\n - Current system settings\n - Available IO plugins\n\n NOTE: we assume that all bands can be loaded with the same implementation.\n\n \"\"\"\n from datacube.storage._rio import RasterDatasetDataSource\n return rdr_cache()(band.uri_scheme, band.format, fallback=RasterDatasetDataSource)\n\n\ndef new_datasource(band: BandInfo) -> Optional[DataSource]:\n \"\"\"Returns a newly constructed data source to read dataset band data.\n\n An appropriate `DataSource` implementation is chosen based on:\n\n - Dataset URI (protocol part)\n - Dataset format\n - Current system settings\n - Available IO plugins\n\n This function will return the default :class:`RasterDatasetDataSource` if no more specific\n ``DataSource`` can be found.\n\n :param dataset: The dataset to read.\n :param str band_name: the name of the band to read.\n\n \"\"\"\n\n source_type = choose_datasource(band)\n\n if source_type is None:\n return None\n\n return source_type(band)\n", "path": "datacube/drivers/readers.py"}, {"content": "from typing import Optional, Dict, Any, Tuple\nfrom urllib.parse import urlparse\n\nfrom datacube.model import Dataset\nfrom datacube.utils.uris import uri_resolve, pick_uri\n\n\ndef _get_band_and_layer(b: Dict[str, Any]) -> Tuple[Optional[int], Optional[str]]:\n \"\"\" Encode legacy logic for extracting band/layer:\n\n on input:\n band -- Int | Nothing\n layer -- Str | Int | Nothing\n\n Valid combinations are:\n band layer Output\n ---------------------------\n - - ( - , - )\n - int (int, - )\n int - (int, - )\n int str (int, str)\n - str ( - , str)\n\n \"\"\"\n band = b.get('band')\n layer = b.get('layer')\n\n if band is None:\n if isinstance(layer, int):\n return (layer, None)\n if layer is None or isinstance(layer, str):\n return (None, layer)\n\n raise ValueError('Expect `layer` to be one of None,int,str but it is {}'.format(type(layer)))\n else:\n if not isinstance(band, int):\n raise ValueError('Expect `band` to be an integer (it is {})'.format(type(band)))\n if layer is not None and not isinstance(layer, str):\n raise ValueError('Expect `layer` to be one of None,str but it is {}'.format(type(layer)))\n\n return (band, layer)\n\n\ndef _extract_driver_data(ds: Dataset) -> Optional[Any]:\n return ds.metadata_doc.get('driver_data', None)\n\n\ndef measurement_paths(ds: Dataset) -> Dict[str, str]:\n \"\"\"\n Returns a dictionary mapping from band name to url pointing to band storage\n resource.\n\n :return: Band Name => URL\n \"\"\"\n if ds.uris is None:\n raise ValueError('No locations on this dataset')\n\n base = pick_uri(ds.uris)\n return dict((k, uri_resolve(base, m.get('path')))\n for k, m in ds.measurements.items())\n\n\nclass BandInfo:\n __slots__ = ('name',\n 'uri',\n 'band',\n 'layer',\n 'dtype',\n 'nodata',\n 'units',\n 'crs',\n 'transform',\n 'center_time',\n 'format',\n 'driver_data')\n\n def __init__(self,\n ds: Dataset,\n band: str,\n uri_scheme: Optional[str] = None):\n try:\n mp, = ds.type.lookup_measurements([band]).values()\n except KeyError:\n raise ValueError('No such band: {}'.format(band))\n\n mm = ds.measurements.get(mp.canonical_name)\n\n if mm is None:\n raise ValueError('No such band: {}'.format(band))\n\n if ds.uris is None:\n raise ValueError('No uris defined on a dataset')\n\n base_uri = pick_uri(ds.uris, uri_scheme)\n\n bint, layer = _get_band_and_layer(mm)\n\n self.name = band\n self.uri = uri_resolve(base_uri, mm.get('path'))\n self.band = bint\n self.layer = layer\n self.dtype = mp.dtype\n self.nodata = mp.nodata\n self.units = mp.units\n self.crs = ds.crs\n self.transform = ds.transform\n self.format = ds.format\n self.driver_data = _extract_driver_data(ds)\n\n @property\n def uri_scheme(self) -> str:\n return urlparse(self.uri).scheme\n", "path": "datacube/storage/_base.py"}]}
| 3,266 | 247 |
gh_patches_debug_29852
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1506
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proxied evaluations shouldn't show as delegated
Evaluations where a proxy user is responsible shouldn't be shown as "delegated courses" for the delegates of this proxy user. The delegates should see this course in the list of their own courses like those they are directly responsible for and they shouldn't be hidden when the option to hide delegated courses is active.
</issue>
<code>
[start of evap/contributor/views.py]
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied, SuspiciousOperation
3 from django.db import IntegrityError, transaction
4 from django.db.models import Max, Q
5 from django.forms.models import inlineformset_factory
6 from django.shortcuts import get_object_or_404, redirect, render
7 from django.utils.translation import gettext as _
8 from django.views.decorators.http import require_POST
9
10 from evap.contributor.forms import EvaluationForm, DelegatesForm, EditorContributionForm, DelegateSelectionForm
11 from evap.evaluation.auth import responsible_or_contributor_or_delegate_required, editor_or_delegate_required, editor_required
12 from evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile, EmailTemplate
13 from evap.evaluation.tools import get_parameter_from_url_or_session, sort_formset, FileResponse
14 from evap.results.exporters import ResultsExporter
15 from evap.results.tools import (calculate_average_distribution, distribution_to_grade,
16 get_evaluations_with_course_result_attributes, get_single_result_rating_result,
17 normalized_distribution)
18 from evap.staff.forms import ContributionFormSet
19 from evap.student.views import get_valid_form_groups_or_render_vote_page
20
21
22 @responsible_or_contributor_or_delegate_required
23 def index(request):
24 user = request.user
25 show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
26
27 contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
28 own_courses = Course.objects.filter(
29 Q(evaluations__state__in=contributor_visible_states) & (
30 Q(responsibles=user) |
31 Q(evaluations__contributions__contributor=user)
32 )
33 )
34 own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
35 for evaluation in own_evaluations:
36 evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()
37
38 displayed_evaluations = set(own_evaluations)
39 if show_delegated:
40 represented_users = user.represented_users.all()
41 delegated_courses = Course.objects.filter(
42 Q(evaluations__state__in=contributor_visible_states) & (
43 Q(responsibles__in=represented_users) |
44 Q(
45 evaluations__contributions__role=Contribution.Role.EDITOR,
46 evaluations__contributions__contributor__in=represented_users,
47 )
48 )
49 )
50 delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
51 for evaluation in delegated_evaluations:
52 evaluation.delegated_evaluation = True
53 displayed_evaluations |= delegated_evaluations - displayed_evaluations
54 displayed_evaluations = list(displayed_evaluations)
55 displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name)) # evaluations must be sorted for regrouping them in the template
56
57 for evaluation in displayed_evaluations:
58 if evaluation.state == "published":
59 if not evaluation.is_single_result:
60 evaluation.distribution = calculate_average_distribution(evaluation)
61 else:
62 evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
63 evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
64 evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
65 displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)
66
67 semesters = Semester.objects.all()
68 semester_list = [dict(
69 semester_name=semester.name,
70 id=semester.id,
71 is_active=semester.is_active,
72 evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
73 ) for semester in semesters]
74
75 template_data = dict(
76 semester_list=semester_list,
77 show_delegated=show_delegated,
78 delegate_selection_form=DelegateSelectionForm(),
79 )
80 return render(request, "contributor_index.html", template_data)
81
82
83 @editor_required
84 def settings_edit(request):
85 user = request.user
86 form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)
87
88 if form.is_valid():
89 form.save()
90
91 messages.success(request, _("Successfully updated your settings."))
92 return redirect('contributor:settings_edit')
93
94 return render(request, "contributor_settings.html", dict(
95 form=form,
96 delegate_of=user.represented_users.all(),
97 cc_users=user.cc_users.all(),
98 ccing_users=user.ccing_users.all(),
99 ))
100
101
102 @editor_or_delegate_required
103 def evaluation_view(request, evaluation_id):
104 user = request.user
105 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
106
107 # check rights
108 if not evaluation.is_user_editor_or_delegate(user) or evaluation.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:
109 raise PermissionDenied
110
111 InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)
112
113 form = EvaluationForm(request.POST or None, instance=evaluation)
114 formset = InlineContributionFormset(request.POST or None, instance=evaluation)
115
116 # make everything read-only
117 for cform in formset.forms + [form]:
118 for field in cform.fields.values():
119 field.disabled = True
120
121 template_data = dict(form=form, formset=formset, evaluation=evaluation, editable=False)
122 return render(request, "contributor_evaluation_form.html", template_data)
123
124
125 def render_preview(request, formset, evaluation_form, evaluation):
126 # open transaction to not let any other requests see anything of what we're doing here
127 try:
128 with transaction.atomic():
129 evaluation = evaluation_form.save()
130 evaluation.set_last_modified(request.user)
131 evaluation.save()
132 formset.save()
133 request.POST = None # this prevents errors rendered in the vote form
134
135 preview_response = get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True, for_rendering_in_modal=True)[1].content.decode()
136 raise IntegrityError # rollback transaction to discard the database writes
137 except IntegrityError:
138 pass
139
140 return preview_response
141
142
143 @editor_or_delegate_required
144 def evaluation_edit(request, evaluation_id):
145 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
146
147 # check rights
148 if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == 'prepared'):
149 raise PermissionDenied
150
151 post_operation = request.POST.get('operation') if request.POST else None
152 preview = post_operation == 'preview'
153
154 InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)
155 evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)
156 formset = InlineContributionFormset(request.POST or None, instance=evaluation, form_kwargs={'evaluation': evaluation})
157
158 forms_are_valid = evaluation_form.is_valid() and formset.is_valid()
159 if forms_are_valid and not preview:
160 if post_operation not in ('save', 'approve'):
161 raise SuspiciousOperation("Invalid POST operation")
162
163 form_has_changed = evaluation_form.has_changed() or formset.has_changed()
164
165 if form_has_changed:
166 evaluation.set_last_modified(request.user)
167 evaluation_form.save()
168 formset.save()
169
170 if post_operation == 'approve':
171 evaluation.editor_approve()
172 evaluation.save()
173 if form_has_changed:
174 messages.success(request, _("Successfully updated and approved evaluation."))
175 else:
176 messages.success(request, _("Successfully approved evaluation."))
177 else:
178 messages.success(request, _("Successfully updated evaluation."))
179
180 return redirect('contributor:index')
181
182 preview_html = None
183 if preview and forms_are_valid:
184 preview_html = render_preview(request, formset, evaluation_form, evaluation)
185
186 if not forms_are_valid and (evaluation_form.errors or formset.errors):
187 if preview:
188 messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below."))
189 else:
190 messages.error(request, _("The form was not saved. Please resolve the errors shown below."))
191
192 sort_formset(request, formset)
193 template_data = dict(form=evaluation_form, formset=formset, evaluation=evaluation, editable=True, preview_html=preview_html)
194 return render(request, "contributor_evaluation_form.html", template_data)
195
196
197 @responsible_or_contributor_or_delegate_required
198 def evaluation_preview(request, evaluation_id):
199 user = request.user
200 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
201
202 # check rights
203 if not (evaluation.is_user_responsible_or_contributor_or_delegate(user) and evaluation.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):
204 raise PermissionDenied
205
206 return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]
207
208
209 @require_POST
210 @editor_or_delegate_required
211 def evaluation_direct_delegation(request, evaluation_id):
212 delegate_user_id = request.POST.get("delegate_to")
213
214 evaluation = get_object_or_404(Evaluation, id=evaluation_id)
215 delegate_user = get_object_or_404(UserProfile, id=delegate_user_id)
216
217 contribution, created = Contribution.objects.update_or_create(
218 evaluation=evaluation,
219 contributor=delegate_user,
220 defaults={'role': Contribution.Role.EDITOR},
221 )
222 if created:
223 contribution.order = evaluation.contributions.all().aggregate(Max('order'))['order__max'] + 1
224 contribution.save()
225
226 template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)
227 subject_params = {"evaluation": evaluation, "user": request.user, "delegate_user": delegate_user}
228 body_params = subject_params
229
230 # we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have
231 # an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.
232 template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])
233
234 messages.add_message(
235 request,
236 messages.SUCCESS,
237 _('{} was added as a contributor for evaluation "{}" and was sent an email with further information.').format(str(delegate_user), str(evaluation))
238 )
239
240 return redirect('contributor:index')
241
242
243 def export_contributor_results(contributor):
244 filename = "Evaluation_{}.xls".format(contributor.full_name)
245 response = FileResponse(filename, content_type="application/vnd.ms-excel")
246 ResultsExporter().export(
247 response,
248 Semester.objects.all(),
249 [(Degree.objects.all(), CourseType.objects.all())],
250 include_not_enough_voters=True,
251 include_unpublished=False,
252 contributor=contributor
253 )
254 return response
255
256
257 @responsible_or_contributor_or_delegate_required
258 def export(request):
259 return export_contributor_results(request.user)
260
[end of evap/contributor/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/contributor/views.py b/evap/contributor/views.py
--- a/evap/contributor/views.py
+++ b/evap/contributor/views.py
@@ -24,11 +24,14 @@
user = request.user
show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)
+ represented_proxy_users = user.represented_users.filter(is_proxy_user=True)
contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
own_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states) & (
Q(responsibles=user) |
- Q(evaluations__contributions__contributor=user)
+ Q(evaluations__contributions__contributor=user) |
+ Q(evaluations__contributions__contributor__in=represented_proxy_users) |
+ Q(responsibles__in=represented_proxy_users)
)
)
own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
@@ -37,7 +40,7 @@
displayed_evaluations = set(own_evaluations)
if show_delegated:
- represented_users = user.represented_users.all()
+ represented_users = user.represented_users.exclude(is_proxy_user=True)
delegated_courses = Course.objects.filter(
Q(evaluations__state__in=contributor_visible_states) & (
Q(responsibles__in=represented_users) |
|
{"golden_diff": "diff --git a/evap/contributor/views.py b/evap/contributor/views.py\n--- a/evap/contributor/views.py\n+++ b/evap/contributor/views.py\n@@ -24,11 +24,14 @@\n user = request.user\n show_delegated = get_parameter_from_url_or_session(request, \"show_delegated\", True)\n \n+ represented_proxy_users = user.represented_users.filter(is_proxy_user=True)\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles=user) |\n- Q(evaluations__contributions__contributor=user)\n+ Q(evaluations__contributions__contributor=user) |\n+ Q(evaluations__contributions__contributor__in=represented_proxy_users) |\n+ Q(responsibles__in=represented_proxy_users)\n )\n )\n own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]\n@@ -37,7 +40,7 @@\n \n displayed_evaluations = set(own_evaluations)\n if show_delegated:\n- represented_users = user.represented_users.all()\n+ represented_users = user.represented_users.exclude(is_proxy_user=True)\n delegated_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles__in=represented_users) |\n", "issue": "Proxied evaluations shouldn't show as delegated\nEvaluations where a proxy user is responsible shouldn't be shown as \"delegated courses\" for the delegates of this proxy user. The delegates should see this course in the list of their own courses like those they are directly responsible for and they shouldn't be hidden when the option to hide delegated courses is active.\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Max, Q\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.contributor.forms import EvaluationForm, DelegatesForm, EditorContributionForm, DelegateSelectionForm\nfrom evap.evaluation.auth import responsible_or_contributor_or_delegate_required, editor_or_delegate_required, editor_required\nfrom evap.evaluation.models import Contribution, Course, CourseType, Degree, Evaluation, Semester, UserProfile, EmailTemplate\nfrom evap.evaluation.tools import get_parameter_from_url_or_session, sort_formset, FileResponse\nfrom evap.results.exporters import ResultsExporter\nfrom evap.results.tools import (calculate_average_distribution, distribution_to_grade,\n get_evaluations_with_course_result_attributes, get_single_result_rating_result,\n normalized_distribution)\nfrom evap.staff.forms import ContributionFormSet\nfrom evap.student.views import get_valid_form_groups_or_render_vote_page\n\n\n@responsible_or_contributor_or_delegate_required\ndef index(request):\n user = request.user\n show_delegated = get_parameter_from_url_or_session(request, \"show_delegated\", True)\n\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles=user) |\n Q(evaluations__contributions__contributor=user)\n )\n )\n own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]\n for evaluation in own_evaluations:\n evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()\n\n displayed_evaluations = set(own_evaluations)\n if show_delegated:\n represented_users = user.represented_users.all()\n delegated_courses = Course.objects.filter(\n Q(evaluations__state__in=contributor_visible_states) & (\n Q(responsibles__in=represented_users) |\n Q(\n evaluations__contributions__role=Contribution.Role.EDITOR,\n evaluations__contributions__contributor__in=represented_users,\n )\n )\n )\n delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))\n for evaluation in delegated_evaluations:\n evaluation.delegated_evaluation = True\n displayed_evaluations |= delegated_evaluations - displayed_evaluations\n displayed_evaluations = list(displayed_evaluations)\n displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name)) # evaluations must be sorted for regrouping them in the template\n\n for evaluation in displayed_evaluations:\n if evaluation.state == \"published\":\n if not evaluation.is_single_result:\n evaluation.distribution = calculate_average_distribution(evaluation)\n else:\n evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)\n evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)\n evaluation.avg_grade = distribution_to_grade(evaluation.distribution)\n displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)\n\n semesters = Semester.objects.all()\n semester_list = [dict(\n semester_name=semester.name,\n id=semester.id,\n is_active=semester.is_active,\n evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]\n ) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n show_delegated=show_delegated,\n delegate_selection_form=DelegateSelectionForm(),\n )\n return render(request, \"contributor_index.html\", template_data)\n\n\n@editor_required\ndef settings_edit(request):\n user = request.user\n form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Successfully updated your settings.\"))\n return redirect('contributor:settings_edit')\n\n return render(request, \"contributor_settings.html\", dict(\n form=form,\n delegate_of=user.represented_users.all(),\n cc_users=user.cc_users.all(),\n ccing_users=user.ccing_users.all(),\n ))\n\n\n@editor_or_delegate_required\ndef evaluation_view(request, evaluation_id):\n user = request.user\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not evaluation.is_user_editor_or_delegate(user) or evaluation.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:\n raise PermissionDenied\n\n InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n\n form = EvaluationForm(request.POST or None, instance=evaluation)\n formset = InlineContributionFormset(request.POST or None, instance=evaluation)\n\n # make everything read-only\n for cform in formset.forms + [form]:\n for field in cform.fields.values():\n field.disabled = True\n\n template_data = dict(form=form, formset=formset, evaluation=evaluation, editable=False)\n return render(request, \"contributor_evaluation_form.html\", template_data)\n\n\ndef render_preview(request, formset, evaluation_form, evaluation):\n # open transaction to not let any other requests see anything of what we're doing here\n try:\n with transaction.atomic():\n evaluation = evaluation_form.save()\n evaluation.set_last_modified(request.user)\n evaluation.save()\n formset.save()\n request.POST = None # this prevents errors rendered in the vote form\n\n preview_response = get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True, for_rendering_in_modal=True)[1].content.decode()\n raise IntegrityError # rollback transaction to discard the database writes\n except IntegrityError:\n pass\n\n return preview_response\n\n\n@editor_or_delegate_required\ndef evaluation_edit(request, evaluation_id):\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not (evaluation.is_user_editor_or_delegate(request.user) and evaluation.state == 'prepared'):\n raise PermissionDenied\n\n post_operation = request.POST.get('operation') if request.POST else None\n preview = post_operation == 'preview'\n\n InlineContributionFormset = inlineformset_factory(Evaluation, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)\n evaluation_form = EvaluationForm(request.POST or None, instance=evaluation)\n formset = InlineContributionFormset(request.POST or None, instance=evaluation, form_kwargs={'evaluation': evaluation})\n\n forms_are_valid = evaluation_form.is_valid() and formset.is_valid()\n if forms_are_valid and not preview:\n if post_operation not in ('save', 'approve'):\n raise SuspiciousOperation(\"Invalid POST operation\")\n\n form_has_changed = evaluation_form.has_changed() or formset.has_changed()\n\n if form_has_changed:\n evaluation.set_last_modified(request.user)\n evaluation_form.save()\n formset.save()\n\n if post_operation == 'approve':\n evaluation.editor_approve()\n evaluation.save()\n if form_has_changed:\n messages.success(request, _(\"Successfully updated and approved evaluation.\"))\n else:\n messages.success(request, _(\"Successfully approved evaluation.\"))\n else:\n messages.success(request, _(\"Successfully updated evaluation.\"))\n\n return redirect('contributor:index')\n\n preview_html = None\n if preview and forms_are_valid:\n preview_html = render_preview(request, formset, evaluation_form, evaluation)\n\n if not forms_are_valid and (evaluation_form.errors or formset.errors):\n if preview:\n messages.error(request, _(\"The preview could not be rendered. Please resolve the errors shown below.\"))\n else:\n messages.error(request, _(\"The form was not saved. Please resolve the errors shown below.\"))\n\n sort_formset(request, formset)\n template_data = dict(form=evaluation_form, formset=formset, evaluation=evaluation, editable=True, preview_html=preview_html)\n return render(request, \"contributor_evaluation_form.html\", template_data)\n\n\n@responsible_or_contributor_or_delegate_required\ndef evaluation_preview(request, evaluation_id):\n user = request.user\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n\n # check rights\n if not (evaluation.is_user_responsible_or_contributor_or_delegate(user) and evaluation.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n return get_valid_form_groups_or_render_vote_page(request, evaluation, preview=True)[1]\n\n\n@require_POST\n@editor_or_delegate_required\ndef evaluation_direct_delegation(request, evaluation_id):\n delegate_user_id = request.POST.get(\"delegate_to\")\n\n evaluation = get_object_or_404(Evaluation, id=evaluation_id)\n delegate_user = get_object_or_404(UserProfile, id=delegate_user_id)\n\n contribution, created = Contribution.objects.update_or_create(\n evaluation=evaluation,\n contributor=delegate_user,\n defaults={'role': Contribution.Role.EDITOR},\n )\n if created:\n contribution.order = evaluation.contributions.all().aggregate(Max('order'))['order__max'] + 1\n contribution.save()\n\n template = EmailTemplate.objects.get(name=EmailTemplate.DIRECT_DELEGATION)\n subject_params = {\"evaluation\": evaluation, \"user\": request.user, \"delegate_user\": delegate_user}\n body_params = subject_params\n\n # we don't provide the request here since send_to_user only uses it to display a warning message in case the user does not have\n # an email address. In this special case, we don't want that warning. Instead, we want a mail to the admins.\n template.send_to_user(delegate_user, subject_params, body_params, use_cc=True, additional_cc_users=[request.user])\n\n messages.add_message(\n request,\n messages.SUCCESS,\n _('{} was added as a contributor for evaluation \"{}\" and was sent an email with further information.').format(str(delegate_user), str(evaluation))\n )\n\n return redirect('contributor:index')\n\n\ndef export_contributor_results(contributor):\n filename = \"Evaluation_{}.xls\".format(contributor.full_name)\n response = FileResponse(filename, content_type=\"application/vnd.ms-excel\")\n ResultsExporter().export(\n response,\n Semester.objects.all(),\n [(Degree.objects.all(), CourseType.objects.all())],\n include_not_enough_voters=True,\n include_unpublished=False,\n contributor=contributor\n )\n return response\n\n\n@responsible_or_contributor_or_delegate_required\ndef export(request):\n return export_contributor_results(request.user)\n", "path": "evap/contributor/views.py"}]}
| 3,659 | 358 |
gh_patches_debug_4853
|
rasdani/github-patches
|
git_diff
|
google__flax-3886
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve SEO for docs pages
When I Google a Flax module to pull up its API documentation, I get broken or irrelevant pages. Examples:
- [flax dense](https://www.google.com/search?q=flax%20dense) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Dense.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Dense)
- [flax layernorm](https://www.google.com/search?q=flax%20layernorm) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.LayerNorm.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.LayerNorm)
- [flax multi head attention](https://www.google.com/search?q=flax%20attention) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.MultiHeadDotProductAttention.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.MultiHeadAttention)
- [flax conv](https://www.google.com/search?q=flax%20conv) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Conv.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Conv)
Is there something that can be done on the website side to improve this?
</issue>
<code>
[start of docs/conf.py]
1 # Copyright 2024 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Configuration file for the Sphinx documentation builder."""
16
17
18 # This file only contains a selection of the most common options. For a full
19 # list see the documentation:
20 # https://www.sphinx-doc.org/en/master/usage/configuration.html
21
22 # -- Path setup --------------------------------------------------------------
23
24 # If extensions (or modules to document with autodoc) are in another directory,
25 # add these directories to sys.path here. If the directory is relative to the
26 # documentation root, use os.path.abspath to make it absolute, like shown here.
27 #
28 # import os
29 # import sys
30 # sys.path.insert(0, os.path.abspath('.'))
31
32 import os
33 import sys
34
35 sys.path.insert(0, os.path.abspath('..'))
36 # Include local extension.
37 sys.path.append(os.path.abspath('./_ext'))
38
39 # patch sphinx
40 # -- Project information -----------------------------------------------------
41
42 project = 'Flax'
43 copyright = '2023, The Flax authors' # pylint: disable=redefined-builtin
44 author = 'The Flax authors'
45
46
47 # -- General configuration ---------------------------------------------------
48
49 # Add any Sphinx extension module names here, as strings. They can be
50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
51 # ones.
52 extensions = [
53 'sphinx.ext.autodoc',
54 'sphinx.ext.autosummary',
55 'sphinx.ext.autosectionlabel',
56 'sphinx.ext.doctest',
57 'sphinx.ext.intersphinx',
58 'sphinx.ext.mathjax',
59 'sphinx.ext.napoleon',
60 'sphinx.ext.viewcode',
61 'myst_nb',
62 'codediff',
63 'flax_module',
64 'sphinx_design',
65 ]
66
67 # Add any paths that contain templates here, relative to this directory.
68 templates_path = ['_templates']
69
70 # List of patterns, relative to source directory, that match files and
71 # directories to ignore when looking for source files.
72 # This pattern also affects html_static_path and html_extra_path.
73 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
74
75 # The suffix(es) of source filenames.
76 # Note: important to list ipynb before md here: we have both md and ipynb
77 # copies of each notebook, and myst will choose which to convert based on
78 # the order in the source_suffix list. Notebooks which are not executed have
79 # outputs stored in ipynb but not in md, so we must convert the ipynb.
80 source_suffix = ['.rst', '.ipynb', '.md']
81
82 autosummary_generate = True
83
84 master_doc = 'index'
85
86 autodoc_typehints = 'none'
87
88
89 # -- Options for HTML output -------------------------------------------------
90
91 # The theme to use for HTML and HTML Help pages. See the documentation for
92 # a list of builtin themes.
93 #
94 # html_theme = 'pydata_sphinx_theme'
95 html_theme = 'sphinx_book_theme'
96 html_css_files = ['css/flax_theme.css']
97
98 # The name of an image file (relative to this directory) to place at the top
99 # of the sidebar.
100 html_logo = './flax.png'
101 html_favicon = './flax.png'
102
103 # title of the website
104 html_title = ''
105
106 # Add any paths that contain custom static files (such as style sheets) here,
107 # relative to this directory. They are copied after the builtin static files,
108 # so a file named 'default.css' will overwrite the builtin 'default.css'.
109 html_static_path = ['_static']
110
111 html_theme_options = {
112 'repository_url': 'https://github.com/google/flax',
113 'use_repository_button': True, # add a 'link to repository' button
114 'use_issues_button': False, # add an 'Open an Issue' button
115 'path_to_docs': (
116 'docs'
117 ), # used to compute the path to launch notebooks in colab
118 'launch_buttons': {
119 'colab_url': 'https://colab.research.google.com/',
120 },
121 'prev_next_buttons_location': None,
122 'show_navbar_depth': 1,
123 }
124
125 # -- Options for myst ----------------------------------------------
126 # uncomment line below to avoid running notebooks during development
127 nb_execution_mode = 'off'
128 # Notebook cell execution timeout; defaults to 30.
129 nb_execution_timeout = 100
130 # List of patterns, relative to source directory, that match notebook
131 # files that will not be executed.
132 myst_enable_extensions = ['dollarmath']
133 nb_execution_excludepatterns = [
134 'quick_start.ipynb', # <-- times out
135 'transfer_learning.ipynb', # <-- transformers requires flax<=0.7.0
136 'flax/experimental/nnx', # exclude nnx
137 ]
138 # raise exceptions on execution so CI can catch errors
139 nb_execution_allow_errors = False
140 nb_execution_raise_on_error = True
141
142 # -- Extension configuration -------------------------------------------------
143
144 # Tell sphinx-autodoc-typehints to generate stub parameter annotations including
145 # types, even if the parameters aren't explicitly documented.
146 always_document_param_types = True
147
148 # -- doctest configuration -------------------------------------------------
149 doctest_global_setup = """
150 import jax
151 import jax.numpy as jnp
152 from flax.experimental import nnx
153
154 import logging as slog
155 from absl import logging as alog
156
157 # Avoid certain absl logging messages to break doctest
158 filtered_message = [
159 'SaveArgs.aggregate is deprecated',
160 '',
161 ]
162
163 class _CustomLogFilter(slog.Formatter):
164 def format(self, record):
165 message = super(_CustomLogFilter, self).format(record)
166 for m in filtered_message:
167 if m in message:
168 return ''
169 return message
170
171 alog.use_absl_handler()
172 alog.get_absl_handler().setFormatter(_CustomLogFilter())
173 """
174
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -108,6 +108,8 @@
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['_static']
+html_extra_path = ['robots.txt']
+
html_theme_options = {
'repository_url': 'https://github.com/google/flax',
'use_repository_button': True, # add a 'link to repository' button
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -108,6 +108,8 @@\n # so a file named 'default.css' will overwrite the builtin 'default.css'.\n html_static_path = ['_static']\n \n+html_extra_path = ['robots.txt']\n+\n html_theme_options = {\n 'repository_url': 'https://github.com/google/flax',\n 'use_repository_button': True, # add a 'link to repository' button\n", "issue": "Improve SEO for docs pages\nWhen I Google a Flax module to pull up its API documentation, I get broken or irrelevant pages. Examples:\r\n\r\n- [flax dense](https://www.google.com/search?q=flax%20dense) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Dense.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Dense)\r\n- [flax layernorm](https://www.google.com/search?q=flax%20layernorm) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.LayerNorm.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.LayerNorm)\r\n- [flax multi head attention](https://www.google.com/search?q=flax%20attention) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.MultiHeadDotProductAttention.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.MultiHeadAttention)\r\n- [flax conv](https://www.google.com/search?q=flax%20conv) | [top result](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/_autosummary/flax.linen.Conv.html) | [desired page](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/layers.html#flax.linen.Conv)\r\n\r\nIs there something that can be done on the website side to improve this?\n", "before_files": [{"content": "# Copyright 2024 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\n\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n# Include local extension.\nsys.path.append(os.path.abspath('./_ext'))\n\n# patch sphinx\n# -- Project information -----------------------------------------------------\n\nproject = 'Flax'\ncopyright = '2023, The Flax authors' # pylint: disable=redefined-builtin\nauthor = 'The Flax authors'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'myst_nb',\n 'codediff',\n 'flax_module',\n 'sphinx_design',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The suffix(es) of source filenames.\n# Note: important to list ipynb before md here: we have both md and ipynb\n# copies of each notebook, and myst will choose which to convert based on\n# the order in the source_suffix list. Notebooks which are not executed have\n# outputs stored in ipynb but not in md, so we must convert the ipynb.\nsource_suffix = ['.rst', '.ipynb', '.md']\n\nautosummary_generate = True\n\nmaster_doc = 'index'\n\nautodoc_typehints = 'none'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'pydata_sphinx_theme'\nhtml_theme = 'sphinx_book_theme'\nhtml_css_files = ['css/flax_theme.css']\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = './flax.png'\nhtml_favicon = './flax.png'\n\n# title of the website\nhtml_title = ''\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named 'default.css' will overwrite the builtin 'default.css'.\nhtml_static_path = ['_static']\n\nhtml_theme_options = {\n 'repository_url': 'https://github.com/google/flax',\n 'use_repository_button': True, # add a 'link to repository' button\n 'use_issues_button': False, # add an 'Open an Issue' button\n 'path_to_docs': (\n 'docs'\n ), # used to compute the path to launch notebooks in colab\n 'launch_buttons': {\n 'colab_url': 'https://colab.research.google.com/',\n },\n 'prev_next_buttons_location': None,\n 'show_navbar_depth': 1,\n}\n\n# -- Options for myst ----------------------------------------------\n# uncomment line below to avoid running notebooks during development\nnb_execution_mode = 'off'\n# Notebook cell execution timeout; defaults to 30.\nnb_execution_timeout = 100\n# List of patterns, relative to source directory, that match notebook\n# files that will not be executed.\nmyst_enable_extensions = ['dollarmath']\nnb_execution_excludepatterns = [\n 'quick_start.ipynb', # <-- times out\n 'transfer_learning.ipynb', # <-- transformers requires flax<=0.7.0\n 'flax/experimental/nnx', # exclude nnx\n]\n# raise exceptions on execution so CI can catch errors\nnb_execution_allow_errors = False\nnb_execution_raise_on_error = True\n\n# -- Extension configuration -------------------------------------------------\n\n# Tell sphinx-autodoc-typehints to generate stub parameter annotations including\n# types, even if the parameters aren't explicitly documented.\nalways_document_param_types = True\n\n# -- doctest configuration -------------------------------------------------\ndoctest_global_setup = \"\"\"\nimport jax\nimport jax.numpy as jnp\nfrom flax.experimental import nnx\n\nimport logging as slog\nfrom absl import logging as alog\n\n# Avoid certain absl logging messages to break doctest\nfiltered_message = [\n 'SaveArgs.aggregate is deprecated',\n '',\n]\n\nclass _CustomLogFilter(slog.Formatter):\n def format(self, record):\n message = super(_CustomLogFilter, self).format(record)\n for m in filtered_message:\n if m in message:\n return ''\n return message\n\nalog.use_absl_handler()\nalog.get_absl_handler().setFormatter(_CustomLogFilter())\n\"\"\"\n", "path": "docs/conf.py"}]}
| 2,696 | 112 |
gh_patches_debug_17591
|
rasdani/github-patches
|
git_diff
|
yt-project__yt-3423
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Weird styling in docs navbar
### Bug report
**Bug summary**
Since 3.6.1, there's a weird offset (or lack of it) for first item in the docs main navbar. See below:

</issue>
<code>
[start of doc/source/conf.py]
1 #
2 # yt documentation build configuration file, created by
3 # sphinx-quickstart on Tue Jan 11 09:46:53 2011.
4 #
5 # This file is execfile()d with the current directory set to its containing dir.
6 #
7 # Note that not all possible configuration values are present in this
8 # autogenerated file.
9 #
10 # All configuration values have a default; values that are commented out
11 # serve to show the default.
12
13 import glob
14 import os
15 import sys
16
17 import sphinx_bootstrap_theme
18
19 on_rtd = os.environ.get("READTHEDOCS", None) == "True"
20
21 # If extensions (or modules to document with autodoc) are in another directory,
22 # add these directories to sys.path here. If the directory is relative to the
23 # documentation root, use os.path.abspath to make it absolute, like shown here.
24 sys.path.insert(0, os.path.abspath("../extensions/"))
25
26 # -- General configuration -----------------------------------------------------
27
28 # If your documentation needs a minimal Sphinx version, state it here.
29 # needs_sphinx = '1.0'
30
31 # Add any Sphinx extension module names here, as strings. They can be extensions
32 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
33 extensions = [
34 "sphinx.ext.autodoc",
35 "sphinx.ext.intersphinx",
36 "sphinx.ext.mathjax",
37 "sphinx.ext.viewcode",
38 "sphinx.ext.napoleon",
39 "yt_cookbook",
40 "yt_colormaps",
41 "config_help",
42 "yt_showfields",
43 ]
44
45 if not on_rtd:
46 extensions.append("sphinx.ext.autosummary")
47 extensions.append("pythonscript_sphinxext")
48
49 try:
50 import nbconvert # noqa: F401
51 import RunNotebook # noqa: F401
52
53 if not on_rtd:
54 extensions.append("RunNotebook.notebook_sphinxext")
55 extensions.append("RunNotebook.notebookcell_sphinxext")
56 except ImportError:
57 pass
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ["_templates"]
61
62 # The suffix of source filenames.
63 source_suffix = ".rst"
64
65 # The encoding of source files.
66 # source_encoding = 'utf-8-sig'
67
68 # The master toctree document.
69 master_doc = "index"
70
71 # General information about the project.
72 project = "The yt Project"
73 copyright = "2013-2020, the yt Project"
74
75 # The version info for the project you're documenting, acts as replacement for
76 # |version| and |release|, also used in various other places throughout the
77 # built documents.
78 #
79 # The short X.Y version.
80 version = "4.1-dev"
81 # The full version, including alpha/beta/rc tags.
82 release = "4.1-dev"
83
84 # The language for content autogenerated by Sphinx. Refer to documentation
85 # for a list of supported languages.
86 # language = None
87
88 # There are two options for replacing |today|: either, you set today to some
89 # non-false value, then it is used:
90 # today = ''
91 # Else, today_fmt is used as the format for a strftime call.
92 # today_fmt = '%B %d, %Y'
93
94 # List of patterns, relative to source directory, that match files and
95 # directories to ignore when looking for source files.
96 exclude_patterns = []
97
98 # The reST default role (used for this markup: `text`) to use for all documents.
99 # default_role = None
100
101 # If true, '()' will be appended to :func: etc. cross-reference text.
102 # add_function_parentheses = True
103
104 # If true, the current module name will be prepended to all description
105 # unit titles (such as .. function::).
106 # add_module_names = True
107
108 # If true, sectionauthor and moduleauthor directives will be shown in the
109 # output. They are ignored by default.
110 show_authors = False
111
112 # The name of the Pygments (syntax highlighting) style to use.
113 pygments_style = "sphinx"
114
115 # A list of ignored prefixes for module index sorting.
116 # modindex_common_prefix = []
117
118
119 # -- Options for HTML output ---------------------------------------------------
120
121 # The theme to use for HTML and HTML Help pages. See the documentation for
122 # a list of builtin themes.
123 html_theme = "bootstrap"
124 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
125
126 # Theme options are theme-specific and customize the look and feel of a theme
127 # further. For a list of options available for each theme, see the
128 # documentation.
129 html_theme_options = dict(
130 bootstrap_version="3",
131 bootswatch_theme="readable",
132 navbar_links=[
133 ("How to get help", "help/index"),
134 ("Quickstart notebooks", "quickstart/index"),
135 ("Cookbook", "cookbook/index"),
136 ],
137 navbar_sidebarrel=False,
138 globaltoc_depth=2,
139 )
140
141 # Add any paths that contain custom themes here, relative to this directory.
142 # html_theme_path = []
143
144 # The name for this set of Sphinx documents. If None, it defaults to
145 # "<project> v<release> documentation".
146 # html_title = None
147
148 # A shorter title for the navigation bar. Default is the same as html_title.
149 # html_short_title = None
150
151 # The name of an image file (relative to this directory) to place at the top
152 # of the sidebar.
153 html_logo = "_static/yt_icon.png"
154
155 # The name of an image file (within the static path) to use as favicon of the
156 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
157 # pixels large.
158 # html_favicon = None
159
160 # Add any paths that contain custom static files (such as style sheets) here,
161 # relative to this directory. They are copied after the builtin static files,
162 # so a file named "default.css" will overwrite the builtin "default.css".
163 html_static_path = ["_static", "analyzing/_static"]
164
165 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
166 # using the given strftime format.
167 # html_last_updated_fmt = '%b %d, %Y'
168
169 # If true, SmartyPants will be used to convert quotes and dashes to
170 # typographically correct entities.
171 # html_use_smartypants = True
172
173 # Custom sidebar templates, maps document names to template names.
174 # html_sidebars = {}
175
176 # Additional templates that should be rendered to pages, maps page names to
177 # template names.
178 # html_additional_pages = {}
179
180 # If false, no module index is generated.
181 html_domain_indices = False
182
183 # If false, no index is generated.
184 html_use_index = True
185
186 # If true, the index is split into individual pages for each letter.
187 # html_split_index = False
188
189 # If true, links to the reST sources are added to the pages.
190 html_show_sourcelink = False
191
192 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
193 # html_show_sphinx = True
194
195 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
196 # html_show_copyright = True
197
198 # If true, an OpenSearch description file will be output, and all pages will
199 # contain a <link> tag referring to it. The value of this option must be the
200 # base URL from which the finished HTML is served.
201 # html_use_opensearch = ''
202
203 # This is the file name suffix for HTML files (e.g. ".xhtml").
204 # html_file_suffix = None
205
206 # Output file base name for HTML help builder.
207 htmlhelp_basename = "ytdoc"
208
209
210 # -- Options for LaTeX output --------------------------------------------------
211
212 # The paper size ('letter' or 'a4').
213 # latex_paper_size = 'letter'
214
215 # The font size ('10pt', '11pt' or '12pt').
216 # latex_font_size = '10pt'
217
218 # Grouping the document tree into LaTeX files. List of tuples
219 # (source start file, target name, title, author, documentclass [howto/manual]).
220 latex_documents = [
221 ("index", "yt.tex", "yt Documentation", "The yt Project", "manual"),
222 ]
223
224 # The name of an image file (relative to this directory) to place at the top of
225 # the title page.
226 # latex_logo = None
227
228 # For "manual" documents, if this is true, then toplevel headings are parts,
229 # not chapters.
230 # latex_use_parts = False
231
232 # If true, show page references after internal links.
233 # latex_show_pagerefs = False
234
235 # If true, show URL addresses after external links.
236 # latex_show_urls = False
237
238 # Documents to append as an appendix to all manuals.
239 # latex_appendices = []
240
241 # If false, no module index is generated.
242 # latex_domain_indices = True
243
244
245 # -- Options for manual page output --------------------------------------------
246
247 # One entry per manual page. List of tuples
248 # (source start file, name, description, authors, manual section).
249 man_pages = [("index", "yt", "yt Documentation", ["The yt Project"], 1)]
250
251
252 # Example configuration for intersphinx: refer to the Python standard library.
253 intersphinx_mapping = {
254 "https://docs.python.org/3/": None,
255 "https://ipython.readthedocs.io/en/stable/": None,
256 "https://numpy.org/doc/stable/": None,
257 "https://matplotlib.org/stable/": None,
258 "https://docs.astropy.org/en/stable": None,
259 "https://pandas.pydata.org/pandas-docs/stable": None,
260 "trident": ("https://trident.readthedocs.io/en/latest/", None),
261 "yt_astro_analysis": ("https://yt-astro-analysis.readthedocs.io/en/latest/", None),
262 "yt_attic": ("https://yt-attic.readthedocs.io/en/latest/", None),
263 }
264
265 if not on_rtd:
266 autosummary_generate = glob.glob("reference/api/api.rst")
267
268 # as of Sphinx 3.1.2 this is the supported way to link custom style sheets
269 def setup(app):
270 app.add_css_file("custom.css")
271
[end of doc/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -70,7 +70,7 @@
# General information about the project.
project = "The yt Project"
-copyright = "2013-2020, the yt Project"
+copyright = "2013-2021, the yt Project"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -130,6 +130,7 @@
bootstrap_version="3",
bootswatch_theme="readable",
navbar_links=[
+ ("", ""), # see https://github.com/yt-project/yt/pull/3423
("How to get help", "help/index"),
("Quickstart notebooks", "quickstart/index"),
("Cookbook", "cookbook/index"),
|
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -70,7 +70,7 @@\n \n # General information about the project.\n project = \"The yt Project\"\n-copyright = \"2013-2020, the yt Project\"\n+copyright = \"2013-2021, the yt Project\"\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n@@ -130,6 +130,7 @@\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n+ (\"\", \"\"), # see https://github.com/yt-project/yt/pull/3423\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n", "issue": "Weird styling in docs navbar\n### Bug report\r\n\r\n**Bug summary**\r\n\r\nSince 3.6.1, there's a weird offset (or lack of it) for first item in the docs main navbar. See below:\r\n\r\n\r\n\n", "before_files": [{"content": "#\n# yt documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 11 09:46:53 2011.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport glob\nimport os\nimport sys\n\nimport sphinx_bootstrap_theme\n\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"../extensions/\"))\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.napoleon\",\n \"yt_cookbook\",\n \"yt_colormaps\",\n \"config_help\",\n \"yt_showfields\",\n]\n\nif not on_rtd:\n extensions.append(\"sphinx.ext.autosummary\")\n extensions.append(\"pythonscript_sphinxext\")\n\ntry:\n import nbconvert # noqa: F401\n import RunNotebook # noqa: F401\n\n if not on_rtd:\n extensions.append(\"RunNotebook.notebook_sphinxext\")\n extensions.append(\"RunNotebook.notebookcell_sphinxext\")\nexcept ImportError:\n pass\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"The yt Project\"\ncopyright = \"2013-2020, the yt Project\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"4.1-dev\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"4.1-dev\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\nshow_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"bootstrap\"\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = dict(\n bootstrap_version=\"3\",\n bootswatch_theme=\"readable\",\n navbar_links=[\n (\"How to get help\", \"help/index\"),\n (\"Quickstart notebooks\", \"quickstart/index\"),\n (\"Cookbook\", \"cookbook/index\"),\n ],\n navbar_sidebarrel=False,\n globaltoc_depth=2,\n)\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/yt_icon.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"analyzing/_static\"]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ytdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n (\"index\", \"yt.tex\", \"yt Documentation\", \"The yt Project\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"yt\", \"yt Documentation\", [\"The yt Project\"], 1)]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/3/\": None,\n \"https://ipython.readthedocs.io/en/stable/\": None,\n \"https://numpy.org/doc/stable/\": None,\n \"https://matplotlib.org/stable/\": None,\n \"https://docs.astropy.org/en/stable\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n \"trident\": (\"https://trident.readthedocs.io/en/latest/\", None),\n \"yt_astro_analysis\": (\"https://yt-astro-analysis.readthedocs.io/en/latest/\", None),\n \"yt_attic\": (\"https://yt-attic.readthedocs.io/en/latest/\", None),\n}\n\nif not on_rtd:\n autosummary_generate = glob.glob(\"reference/api/api.rst\")\n\n# as of Sphinx 3.1.2 this is the supported way to link custom style sheets\ndef setup(app):\n app.add_css_file(\"custom.css\")\n", "path": "doc/source/conf.py"}]}
| 3,574 | 216 |
gh_patches_debug_13220
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-3396
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pilot.click can't use `Screen` as selector.
If you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.
Pilot.click can't use `Screen` as selector.
If you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.
</issue>
<code>
[start of src/textual/pilot.py]
1 """
2
3 The pilot object is used by [App.run_test][textual.app.App.run_test] to programmatically operate an app.
4
5 See the guide on how to [test Textual apps](/guide/testing).
6
7 """
8
9 from __future__ import annotations
10
11 import asyncio
12 from typing import Any, Generic
13
14 import rich.repr
15
16 from ._wait import wait_for_idle
17 from .app import App, ReturnType
18 from .events import Click, MouseDown, MouseMove, MouseUp
19 from .widget import Widget
20
21
22 def _get_mouse_message_arguments(
23 target: Widget,
24 offset: tuple[int, int] = (0, 0),
25 button: int = 0,
26 shift: bool = False,
27 meta: bool = False,
28 control: bool = False,
29 ) -> dict[str, Any]:
30 """Get the arguments to pass into mouse messages for the click and hover methods."""
31 click_x, click_y = target.region.offset + offset
32 message_arguments = {
33 "x": click_x,
34 "y": click_y,
35 "delta_x": 0,
36 "delta_y": 0,
37 "button": button,
38 "shift": shift,
39 "meta": meta,
40 "ctrl": control,
41 "screen_x": click_x,
42 "screen_y": click_y,
43 }
44 return message_arguments
45
46
47 class WaitForScreenTimeout(Exception):
48 """Exception raised if messages aren't being processed quickly enough.
49
50 If this occurs, the most likely explanation is some kind of deadlock in the app code.
51 """
52
53
54 @rich.repr.auto(angular=True)
55 class Pilot(Generic[ReturnType]):
56 """Pilot object to drive an app."""
57
58 def __init__(self, app: App[ReturnType]) -> None:
59 self._app = app
60
61 def __rich_repr__(self) -> rich.repr.Result:
62 yield "app", self._app
63
64 @property
65 def app(self) -> App[ReturnType]:
66 """App: A reference to the application."""
67 return self._app
68
69 async def press(self, *keys: str) -> None:
70 """Simulate key-presses.
71
72 Args:
73 *keys: Keys to press.
74 """
75 if keys:
76 await self._app._press_keys(keys)
77 await self._wait_for_screen()
78
79 async def click(
80 self,
81 selector: type[Widget] | str | None = None,
82 offset: tuple[int, int] = (0, 0),
83 shift: bool = False,
84 meta: bool = False,
85 control: bool = False,
86 ) -> None:
87 """Simulate clicking with the mouse.
88
89 Args:
90 selector: The widget that should be clicked. If None, then the click
91 will occur relative to the screen. Note that this simply causes
92 a click to occur at the location of the widget. If the widget is
93 currently hidden or obscured by another widget, then the click may
94 not land on it.
95 offset: The offset to click within the selected widget.
96 shift: Click with the shift key held down.
97 meta: Click with the meta key held down.
98 control: Click with the control key held down.
99 """
100 app = self.app
101 screen = app.screen
102 if selector is not None:
103 target_widget = screen.query_one(selector)
104 else:
105 target_widget = screen
106
107 message_arguments = _get_mouse_message_arguments(
108 target_widget, offset, button=1, shift=shift, meta=meta, control=control
109 )
110 app.post_message(MouseDown(**message_arguments))
111 await self.pause(0.1)
112 app.post_message(MouseUp(**message_arguments))
113 await self.pause(0.1)
114 app.post_message(Click(**message_arguments))
115 await self.pause(0.1)
116
117 async def hover(
118 self,
119 selector: type[Widget] | str | None | None = None,
120 offset: tuple[int, int] = (0, 0),
121 ) -> None:
122 """Simulate hovering with the mouse cursor.
123
124 Args:
125 selector: The widget that should be hovered. If None, then the click
126 will occur relative to the screen. Note that this simply causes
127 a hover to occur at the location of the widget. If the widget is
128 currently hidden or obscured by another widget, then the hover may
129 not land on it.
130 offset: The offset to hover over within the selected widget.
131 """
132 app = self.app
133 screen = app.screen
134 if selector is not None:
135 target_widget = screen.query_one(selector)
136 else:
137 target_widget = screen
138
139 message_arguments = _get_mouse_message_arguments(
140 target_widget, offset, button=0
141 )
142 await self.pause()
143 app.post_message(MouseMove(**message_arguments))
144 await self.pause()
145
146 async def _wait_for_screen(self, timeout: float = 30.0) -> bool:
147 """Wait for the current screen and its children to have processed all pending events.
148
149 Args:
150 timeout: A timeout in seconds to wait.
151
152 Returns:
153 `True` if all events were processed. `False` if an exception occurred,
154 meaning that not all events could be processed.
155
156 Raises:
157 WaitForScreenTimeout: If the screen and its children didn't finish processing within the timeout.
158 """
159 children = [self.app, *self.app.screen.walk_children(with_self=True)]
160 count = 0
161 count_zero_event = asyncio.Event()
162
163 def decrement_counter() -> None:
164 """Decrement internal counter, and set an event if it reaches zero."""
165 nonlocal count
166 count -= 1
167 if count == 0:
168 # When count is zero, all messages queued at the start of the method have been processed
169 count_zero_event.set()
170
171 # Increase the count for every successful call_later
172 for child in children:
173 if child.call_later(decrement_counter):
174 count += 1
175
176 if count:
177 # Wait for the count to return to zero, or a timeout, or an exception
178 wait_for = [
179 asyncio.create_task(count_zero_event.wait()),
180 asyncio.create_task(self.app._exception_event.wait()),
181 ]
182 _, pending = await asyncio.wait(
183 wait_for,
184 timeout=timeout,
185 return_when=asyncio.FIRST_COMPLETED,
186 )
187
188 for task in pending:
189 task.cancel()
190
191 timed_out = len(wait_for) == len(pending)
192 if timed_out:
193 raise WaitForScreenTimeout(
194 "Timed out while waiting for widgets to process pending messages."
195 )
196
197 # We've either timed out, encountered an exception, or we've finished
198 # decrementing all the counters (all events processed in children).
199 if count > 0:
200 return False
201
202 return True
203
204 async def pause(self, delay: float | None = None) -> None:
205 """Insert a pause.
206
207 Args:
208 delay: Seconds to pause, or None to wait for cpu idle.
209 """
210 # These sleep zeros, are to force asyncio to give up a time-slice.
211 await self._wait_for_screen()
212 if delay is None:
213 await wait_for_idle(0)
214 else:
215 await asyncio.sleep(delay)
216 self.app.screen._on_timer_update()
217
218 async def wait_for_animation(self) -> None:
219 """Wait for any current animation to complete."""
220 await self._app.animator.wait_for_idle()
221 self.app.screen._on_timer_update()
222
223 async def wait_for_scheduled_animations(self) -> None:
224 """Wait for any current and scheduled animations to complete."""
225 await self._wait_for_screen()
226 await self._app.animator.wait_until_complete()
227 await self._wait_for_screen()
228 await wait_for_idle()
229 self.app.screen._on_timer_update()
230
231 async def exit(self, result: ReturnType) -> None:
232 """Exit the app with the given result.
233
234 Args:
235 result: The app result returned by `run` or `run_async`.
236 """
237 await self._wait_for_screen()
238 await wait_for_idle()
239 self.app.exit(result)
240
[end of src/textual/pilot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/textual/pilot.py b/src/textual/pilot.py
--- a/src/textual/pilot.py
+++ b/src/textual/pilot.py
@@ -100,7 +100,7 @@
app = self.app
screen = app.screen
if selector is not None:
- target_widget = screen.query_one(selector)
+ target_widget = app.query_one(selector)
else:
target_widget = screen
@@ -132,7 +132,7 @@
app = self.app
screen = app.screen
if selector is not None:
- target_widget = screen.query_one(selector)
+ target_widget = app.query_one(selector)
else:
target_widget = screen
|
{"golden_diff": "diff --git a/src/textual/pilot.py b/src/textual/pilot.py\n--- a/src/textual/pilot.py\n+++ b/src/textual/pilot.py\n@@ -100,7 +100,7 @@\n app = self.app\n screen = app.screen\n if selector is not None:\n- target_widget = screen.query_one(selector)\n+ target_widget = app.query_one(selector)\n else:\n target_widget = screen\n \n@@ -132,7 +132,7 @@\n app = self.app\n screen = app.screen\n if selector is not None:\n- target_widget = screen.query_one(selector)\n+ target_widget = app.query_one(selector)\n else:\n target_widget = screen\n", "issue": "Pilot.click can't use `Screen` as selector.\nIf you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.\nPilot.click can't use `Screen` as selector.\nIf you try something like `pilot.click(Screen)`, you get a `NoMatches` exception from the query.\n", "before_files": [{"content": "\"\"\"\n\nThe pilot object is used by [App.run_test][textual.app.App.run_test] to programmatically operate an app.\n\nSee the guide on how to [test Textual apps](/guide/testing).\n\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nfrom typing import Any, Generic\n\nimport rich.repr\n\nfrom ._wait import wait_for_idle\nfrom .app import App, ReturnType\nfrom .events import Click, MouseDown, MouseMove, MouseUp\nfrom .widget import Widget\n\n\ndef _get_mouse_message_arguments(\n target: Widget,\n offset: tuple[int, int] = (0, 0),\n button: int = 0,\n shift: bool = False,\n meta: bool = False,\n control: bool = False,\n) -> dict[str, Any]:\n \"\"\"Get the arguments to pass into mouse messages for the click and hover methods.\"\"\"\n click_x, click_y = target.region.offset + offset\n message_arguments = {\n \"x\": click_x,\n \"y\": click_y,\n \"delta_x\": 0,\n \"delta_y\": 0,\n \"button\": button,\n \"shift\": shift,\n \"meta\": meta,\n \"ctrl\": control,\n \"screen_x\": click_x,\n \"screen_y\": click_y,\n }\n return message_arguments\n\n\nclass WaitForScreenTimeout(Exception):\n \"\"\"Exception raised if messages aren't being processed quickly enough.\n\n If this occurs, the most likely explanation is some kind of deadlock in the app code.\n \"\"\"\n\n\[email protected](angular=True)\nclass Pilot(Generic[ReturnType]):\n \"\"\"Pilot object to drive an app.\"\"\"\n\n def __init__(self, app: App[ReturnType]) -> None:\n self._app = app\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield \"app\", self._app\n\n @property\n def app(self) -> App[ReturnType]:\n \"\"\"App: A reference to the application.\"\"\"\n return self._app\n\n async def press(self, *keys: str) -> None:\n \"\"\"Simulate key-presses.\n\n Args:\n *keys: Keys to press.\n \"\"\"\n if keys:\n await self._app._press_keys(keys)\n await self._wait_for_screen()\n\n async def click(\n self,\n selector: type[Widget] | str | None = None,\n offset: tuple[int, int] = (0, 0),\n shift: bool = False,\n meta: bool = False,\n control: bool = False,\n ) -> None:\n \"\"\"Simulate clicking with the mouse.\n\n Args:\n selector: The widget that should be clicked. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a click to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the click may\n not land on it.\n offset: The offset to click within the selected widget.\n shift: Click with the shift key held down.\n meta: Click with the meta key held down.\n control: Click with the control key held down.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = screen.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=1, shift=shift, meta=meta, control=control\n )\n app.post_message(MouseDown(**message_arguments))\n await self.pause(0.1)\n app.post_message(MouseUp(**message_arguments))\n await self.pause(0.1)\n app.post_message(Click(**message_arguments))\n await self.pause(0.1)\n\n async def hover(\n self,\n selector: type[Widget] | str | None | None = None,\n offset: tuple[int, int] = (0, 0),\n ) -> None:\n \"\"\"Simulate hovering with the mouse cursor.\n\n Args:\n selector: The widget that should be hovered. If None, then the click\n will occur relative to the screen. Note that this simply causes\n a hover to occur at the location of the widget. If the widget is\n currently hidden or obscured by another widget, then the hover may\n not land on it.\n offset: The offset to hover over within the selected widget.\n \"\"\"\n app = self.app\n screen = app.screen\n if selector is not None:\n target_widget = screen.query_one(selector)\n else:\n target_widget = screen\n\n message_arguments = _get_mouse_message_arguments(\n target_widget, offset, button=0\n )\n await self.pause()\n app.post_message(MouseMove(**message_arguments))\n await self.pause()\n\n async def _wait_for_screen(self, timeout: float = 30.0) -> bool:\n \"\"\"Wait for the current screen and its children to have processed all pending events.\n\n Args:\n timeout: A timeout in seconds to wait.\n\n Returns:\n `True` if all events were processed. `False` if an exception occurred,\n meaning that not all events could be processed.\n\n Raises:\n WaitForScreenTimeout: If the screen and its children didn't finish processing within the timeout.\n \"\"\"\n children = [self.app, *self.app.screen.walk_children(with_self=True)]\n count = 0\n count_zero_event = asyncio.Event()\n\n def decrement_counter() -> None:\n \"\"\"Decrement internal counter, and set an event if it reaches zero.\"\"\"\n nonlocal count\n count -= 1\n if count == 0:\n # When count is zero, all messages queued at the start of the method have been processed\n count_zero_event.set()\n\n # Increase the count for every successful call_later\n for child in children:\n if child.call_later(decrement_counter):\n count += 1\n\n if count:\n # Wait for the count to return to zero, or a timeout, or an exception\n wait_for = [\n asyncio.create_task(count_zero_event.wait()),\n asyncio.create_task(self.app._exception_event.wait()),\n ]\n _, pending = await asyncio.wait(\n wait_for,\n timeout=timeout,\n return_when=asyncio.FIRST_COMPLETED,\n )\n\n for task in pending:\n task.cancel()\n\n timed_out = len(wait_for) == len(pending)\n if timed_out:\n raise WaitForScreenTimeout(\n \"Timed out while waiting for widgets to process pending messages.\"\n )\n\n # We've either timed out, encountered an exception, or we've finished\n # decrementing all the counters (all events processed in children).\n if count > 0:\n return False\n\n return True\n\n async def pause(self, delay: float | None = None) -> None:\n \"\"\"Insert a pause.\n\n Args:\n delay: Seconds to pause, or None to wait for cpu idle.\n \"\"\"\n # These sleep zeros, are to force asyncio to give up a time-slice.\n await self._wait_for_screen()\n if delay is None:\n await wait_for_idle(0)\n else:\n await asyncio.sleep(delay)\n self.app.screen._on_timer_update()\n\n async def wait_for_animation(self) -> None:\n \"\"\"Wait for any current animation to complete.\"\"\"\n await self._app.animator.wait_for_idle()\n self.app.screen._on_timer_update()\n\n async def wait_for_scheduled_animations(self) -> None:\n \"\"\"Wait for any current and scheduled animations to complete.\"\"\"\n await self._wait_for_screen()\n await self._app.animator.wait_until_complete()\n await self._wait_for_screen()\n await wait_for_idle()\n self.app.screen._on_timer_update()\n\n async def exit(self, result: ReturnType) -> None:\n \"\"\"Exit the app with the given result.\n\n Args:\n result: The app result returned by `run` or `run_async`.\n \"\"\"\n await self._wait_for_screen()\n await wait_for_idle()\n self.app.exit(result)\n", "path": "src/textual/pilot.py"}]}
| 2,966 | 160 |
gh_patches_debug_624
|
rasdani/github-patches
|
git_diff
|
codespell-project__codespell-89
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Makefile is broken
The makefile is no longer working, since there is no longer a codespell.py
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2
3 # adapted from mne-python
4
5 import os
6 from os import path as op
7
8 try:
9 import setuptools # noqa to allow --develop
10 except Exception:
11 pass
12 from distutils.core import setup
13
14 from codespell_lib import __version__
15
16 DISTNAME = 'codespell'
17 DESCRIPTION = """Codespell"""
18 MAINTAINER = 'Lucas De Marchi'
19 MAINTAINER_EMAIL = '[email protected]'
20 URL = 'https://github.com/lucasdemarchi/codespell/'
21 LICENSE = 'GPL v2'
22 DOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'
23 with open('README.rst', 'r') as f:
24 LONG_DESCRIPTION = f.read()
25
26 if __name__ == "__main__":
27 if os.path.exists('MANIFEST'):
28 os.remove('MANIFEST')
29
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 include_package_data=True,
33 maintainer_email=MAINTAINER_EMAIL,
34 description=DESCRIPTION,
35 license=LICENSE,
36 url=URL,
37 version=__version__,
38 download_url=DOWNLOAD_URL,
39 long_description=LONG_DESCRIPTION,
40 zip_safe=False,
41 classifiers=['Intended Audience :: Developers',
42 'License :: OSI Approved',
43 'Programming Language :: Python',
44 'Topic :: Software Development',
45 'Operating System :: Microsoft :: Windows',
46 'Operating System :: POSIX',
47 'Operating System :: Unix',
48 'Operating System :: MacOS'],
49 platforms='any',
50 packages=[
51 'codespell_lib', 'codespell_lib.tests',
52 'codespell_lib.data',
53 ],
54 package_data={'codespell_lib': [
55 op.join('data', 'dictionary.txt'),
56 op.join('data', 'linux-kernel.exclude'),
57 ]},
58 scripts=['bin/codespell.py'])
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,4 +55,4 @@
op.join('data', 'dictionary.txt'),
op.join('data', 'linux-kernel.exclude'),
]},
- scripts=['bin/codespell.py'])
+ scripts=['bin/codespell'])
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,4 +55,4 @@\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n- scripts=['bin/codespell.py'])\n+ scripts=['bin/codespell'])\n", "issue": "Makefile is broken\nThe makefile is no longer working, since there is no longer a codespell.py\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\nfrom os import path as op\n\ntry:\n import setuptools # noqa to allow --develop\nexcept Exception:\n pass\nfrom distutils.core import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/lucasdemarchi/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n packages=[\n 'codespell_lib', 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n scripts=['bin/codespell.py'])\n", "path": "setup.py"}]}
| 1,052 | 76 |
gh_patches_debug_28536
|
rasdani/github-patches
|
git_diff
|
systemd__mkosi-499
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include man page in Python package
When building the man page with
```
pandoc -t man -o mkosi.1 mkosi.md
```
the output looks rather unpleasing:
```
mkosi ‐ Build Legacy‐Free OS Images mkosi [options...] build
mkosi [options...] clean mkosi [options...] summary mkosi [op‐
tions...] shell [command line...] mkosi [options...] boot [com‐
mand line...] mkosi [options...] qemu mkosi is a tool for easily
building legacy‐free OS images. It’s a fancy wrapper around dnf
‐‐installroot, debootstrap, pacstrap and zypper that may generate
disk images with a number of bells and whistles. The following
output formats are supported: Raw GPT disk image, with ext4 as
[...]
```
We should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.
</issue>
<code>
[start of mkosi/__main__.py]
1 # SPDX-License-Identifier: LGPL-2.1+
2 # PYTHON_ARGCOMPLETE_OK
3 import os
4 import sys
5
6 from . import parse_args, complete_step, run_verb, die, MkosiException
7
8
9 try:
10 args = parse_args()
11
12 for job_name, a in args.items():
13 # Change working directory if --directory is passed
14 if a.directory:
15 work_dir = a.directory
16 if os.path.isdir(work_dir):
17 os.chdir(work_dir)
18 else:
19 die(f"Error: {work_dir} is not a directory!")
20 with complete_step(f"Processing {job_name}"):
21 run_verb(a)
22 except MkosiException:
23 sys.exit(1)
24
[end of mkosi/__main__.py]
[start of setup.py]
1 #!/usr/bin/python3
2 # SPDX-License-Identifier: LGPL-2.1+
3
4 from setuptools import setup, Command
5
6 class BuildManpage(Command):
7 description = ('builds the manpage')
8 user_options = []
9
10 def initialize_options(self):
11 pass
12 def finalize_options(self):
13 pass
14
15 def run(self):
16 self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
17
18
19 setup(
20 name="mkosi",
21 version="5",
22 description="Create legacy-free OS images",
23 url="https://github.com/systemd/mkosi",
24 maintainer="mkosi contributors",
25 maintainer_email="[email protected]",
26 license="LGPLv2+",
27 python_requires=">=3.6",
28 packages = ["mkosi"],
29 cmdclass = { "man": BuildManpage },
30 entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
31 )
32
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mkosi/__main__.py b/mkosi/__main__.py
--- a/mkosi/__main__.py
+++ b/mkosi/__main__.py
@@ -6,18 +6,23 @@
from . import parse_args, complete_step, run_verb, die, MkosiException
-try:
- args = parse_args()
+def main() -> None:
+ try:
+ args = parse_args()
- for job_name, a in args.items():
- # Change working directory if --directory is passed
- if a.directory:
- work_dir = a.directory
- if os.path.isdir(work_dir):
- os.chdir(work_dir)
- else:
- die(f"Error: {work_dir} is not a directory!")
- with complete_step(f"Processing {job_name}"):
- run_verb(a)
-except MkosiException:
- sys.exit(1)
+ for job_name, a in args.items():
+ # Change working directory if --directory is passed
+ if a.directory:
+ work_dir = a.directory
+ if os.path.isdir(work_dir):
+ os.chdir(work_dir)
+ else:
+ die(f"Error: {work_dir} is not a directory!")
+ with complete_step(f"Processing {job_name}"):
+ run_verb(a)
+ except MkosiException:
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
pass
def run(self):
- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])
setup(
@@ -27,5 +27,6 @@
python_requires=">=3.6",
packages = ["mkosi"],
cmdclass = { "man": BuildManpage },
- entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
+ data_files = [('share/man/man1', ["man/mkosi.1"])],
+ entry_points = {"console_scripts": ["mkosi=mkosi.__main__:main"]},
)
|
{"golden_diff": "diff --git a/mkosi/__main__.py b/mkosi/__main__.py\n--- a/mkosi/__main__.py\n+++ b/mkosi/__main__.py\n@@ -6,18 +6,23 @@\n from . import parse_args, complete_step, run_verb, die, MkosiException\n \n \n-try:\n- args = parse_args()\n+def main() -> None:\n+ try:\n+ args = parse_args()\n \n- for job_name, a in args.items():\n- # Change working directory if --directory is passed\n- if a.directory:\n- work_dir = a.directory\n- if os.path.isdir(work_dir):\n- os.chdir(work_dir)\n- else:\n- die(f\"Error: {work_dir} is not a directory!\")\n- with complete_step(f\"Processing {job_name}\"):\n- run_verb(a)\n-except MkosiException:\n- sys.exit(1)\n+ for job_name, a in args.items():\n+ # Change working directory if --directory is passed\n+ if a.directory:\n+ work_dir = a.directory\n+ if os.path.isdir(work_dir):\n+ os.chdir(work_dir)\n+ else:\n+ die(f\"Error: {work_dir} is not a directory!\")\n+ with complete_step(f\"Processing {job_name}\"):\n+ run_verb(a)\n+ except MkosiException:\n+ sys.exit(1)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,7 +13,7 @@\n pass\n \n def run(self):\n- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])\n \n \n setup(\n@@ -27,5 +27,6 @@\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n- entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n+ data_files = [('share/man/man1', [\"man/mkosi.1\"])],\n+ entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__:main\"]},\n )\n", "issue": "Include man page in Python package\nWhen building the man page with\r\n```\r\npandoc -t man -o mkosi.1 mkosi.md\r\n```\r\nthe output looks rather unpleasing:\r\n```\r\nmkosi \u2010 Build Legacy\u2010Free OS Images mkosi [options...] build\r\nmkosi [options...] clean mkosi [options...] summary mkosi [op\u2010\r\ntions...] shell [command line...] mkosi [options...] boot [com\u2010\r\nmand line...] mkosi [options...] qemu mkosi is a tool for easily\r\nbuilding legacy\u2010free OS images. It\u2019s a fancy wrapper around dnf\r\n\u2010\u2010installroot, debootstrap, pacstrap and zypper that may generate\r\ndisk images with a number of bells and whistles. The following\r\noutput formats are supported: Raw GPT disk image, with ext4 as\r\n[...]\r\n```\r\n\r\nWe should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\nimport os\nimport sys\n\nfrom . import parse_args, complete_step, run_verb, die, MkosiException\n\n\ntry:\n args = parse_args()\n\n for job_name, a in args.items():\n # Change working directory if --directory is passed\n if a.directory:\n work_dir = a.directory\n if os.path.isdir(work_dir):\n os.chdir(work_dir)\n else:\n die(f\"Error: {work_dir} is not a directory!\")\n with complete_step(f\"Processing {job_name}\"):\n run_verb(a)\nexcept MkosiException:\n sys.exit(1)\n", "path": "mkosi/__main__.py"}, {"content": "#!/usr/bin/python3\n# SPDX-License-Identifier: LGPL-2.1+\n\nfrom setuptools import setup, Command\n\nclass BuildManpage(Command):\n description = ('builds the manpage')\n user_options = []\n\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n\n def run(self):\n self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n\n\nsetup(\n name=\"mkosi\",\n version=\"5\",\n description=\"Create legacy-free OS images\",\n url=\"https://github.com/systemd/mkosi\",\n maintainer=\"mkosi contributors\",\n maintainer_email=\"[email protected]\",\n license=\"LGPLv2+\",\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n)\n", "path": "setup.py"}]}
| 1,247 | 540 |
gh_patches_debug_419
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-8651
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
grid_axis_alignment_no_toolbar is broken in Windows (32-bit)
`examples/integration/layout/grid_axis_alignment_no_toolbar`:

All axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.
</issue>
<code>
[start of examples/integration/layout/grid_axis_alignment_no_toolbar.py]
1 from __future__ import absolute_import
2
3 import numpy as np
4
5 from bokeh.plotting import figure, save
6 from bokeh.layouts import gridplot
7
8 coeffs = [10**0, 10**3, 10**6, 10**9]
9 V = np.arange(10)
10
11 figs = []
12
13 for ycoeff in coeffs:
14 row = []
15 for xcoeff in coeffs:
16 fig = figure(plot_height=200, plot_width=200)
17 fig.xaxis[0].formatter.use_scientific = False
18 fig.yaxis[0].formatter.use_scientific = False
19 fig.xaxis[0].major_label_orientation = "vertical"
20 fig.yaxis[0].major_label_orientation = "horizontal"
21 fig.scatter(V*xcoeff, V*ycoeff)
22 row.append(fig)
23 figs.append(row)
24
25 grid = gridplot(figs, toolbar_location=None)
26
27 save(grid)
28
[end of examples/integration/layout/grid_axis_alignment_no_toolbar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py
+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
@@ -6,7 +6,7 @@
from bokeh.layouts import gridplot
coeffs = [10**0, 10**3, 10**6, 10**9]
-V = np.arange(10)
+V = np.arange(10, dtype="int64")
figs = []
|
{"golden_diff": "diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n@@ -6,7 +6,7 @@\n from bokeh.layouts import gridplot\n \n coeffs = [10**0, 10**3, 10**6, 10**9]\n-V = np.arange(10)\n+V = np.arange(10, dtype=\"int64\")\n \n figs = []\n", "issue": "grid_axis_alignment_no_toolbar is broken in Windows (32-bit)\n`examples/integration/layout/grid_axis_alignment_no_toolbar`:\r\n\r\n\r\n\r\nAll axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10)\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n", "path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"}]}
| 955 | 124 |
gh_patches_debug_17856
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-5636
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary SQL select for react home landing page
The React landing page view extracts the default filters. Every time but it's never used.
<img width="1502" alt="Screen Shot 2019-08-13 at 2 44 39 PM" src="https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png">
</issue>
<code>
[start of kuma/landing/views.py]
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.http import HttpResponse
5 from django.shortcuts import redirect, render
6 from django.views import static
7 from django.views.decorators.cache import never_cache
8 from django.views.generic import RedirectView
9
10 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
11 from kuma.core.utils import is_wiki
12 from kuma.feeder.models import Bundle
13 from kuma.feeder.sections import SECTION_HACKS
14 from kuma.search.models import Filter
15
16 from .utils import favicon_url
17
18
19 @shared_cache_control
20 def contribute_json(request):
21 return static.serve(request, 'contribute.json', document_root=settings.ROOT)
22
23
24 @shared_cache_control
25 def home(request):
26 """Home page."""
27 if is_wiki(request):
28 return render_home(request, 'landing/homepage.html')
29 return render_home(request, 'landing/react_homepage.html')
30
31
32 def render_home(request, template_name):
33 """Render the home page with the template named "template_name"."""
34 updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
35 default_filters = Filter.objects.default_filters()
36 context = {
37 'updates': updates,
38 'default_filters': default_filters,
39 }
40 return render(request, template_name, context)
41
42
43 @ensure_wiki_domain
44 @never_cache
45 def maintenance_mode(request):
46 if settings.MAINTENANCE_MODE:
47 return render(request, 'landing/maintenance-mode.html')
48 else:
49 return redirect('home')
50
51
52 @ensure_wiki_domain
53 @shared_cache_control
54 def promote_buttons(request):
55 """Bug 646192: MDN affiliate buttons"""
56 return render(request, 'landing/promote_buttons.html')
57
58
59 ROBOTS_ALLOWED_TXT = '''\
60 User-agent: *
61 Sitemap: https://developer.mozilla.org/sitemap.xml
62
63 Disallow: /admin/
64 Disallow: /api/
65 Disallow: /*/dashboards/*
66 Disallow: /*docs/feeds
67 Disallow: /*docs/templates
68 Disallow: /*docs*Template:
69 Disallow: /*docs/all
70 Disallow: /*docs/tag*
71 Disallow: /*docs/needs-review*
72 Disallow: /*docs/localization-tag*
73 Disallow: /*docs/with-errors
74 Disallow: /*docs/without-parent
75 Disallow: /*docs/top-level
76 Disallow: /*docs/new
77 Disallow: /*docs/get-documents
78 Disallow: /*docs/submit_akismet_spam
79 Disallow: /*docs/load*
80 Disallow: /*docs/Experiment:*
81 Disallow: /*$api
82 Disallow: /*$compare
83 Disallow: /*$revision
84 Disallow: /*$history
85 Disallow: /*$children
86 Disallow: /*$flag
87 Disallow: /*$locales
88 Disallow: /*$toc
89 Disallow: /*$move
90 Disallow: /*$quick-review
91 Disallow: /*$samples
92 Disallow: /*$revert
93 Disallow: /*$repair_breadcrumbs
94 Disallow: /*$delete
95 Disallow: /*$restore
96 Disallow: /*$purge
97 Disallow: /*$subscribe
98 Disallow: /*$subscribe_to_tree
99 Disallow: /*$vote
100 Disallow: /*docs.json
101 Disallow: /*docs/ckeditor_config.js
102 Disallow: /*/files/
103 Disallow: /media
104 Disallow: /*move-requested
105 Disallow: /*preview-wiki-content
106 Disallow: /*profiles*/edit
107 Disallow: /skins
108 Disallow: /*type=feed
109 Disallow: /*users/
110 ''' + '\n'.join('Disallow: /{locale}/search'.format(locale=locale)
111 for locale in settings.ENABLED_LOCALES)
112
113 ROBOTS_GO_AWAY_TXT = '''\
114 User-Agent: *
115 Disallow: /
116 '''
117
118
119 @shared_cache_control
120 def robots_txt(request):
121 """Serve robots.txt that allows or forbids robots."""
122 host = request.get_host()
123 if host in settings.ALLOW_ROBOTS_DOMAINS:
124 robots = ""
125 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
126 robots = ROBOTS_ALLOWED_TXT
127 else:
128 robots = ROBOTS_GO_AWAY_TXT
129 return HttpResponse(robots, content_type='text/plain')
130
131
132 class FaviconRedirect(RedirectView):
133 """Redirect to the favicon in the static img folder (bug 1402497)"""
134
135 def get_redirect_url(self, *args, **kwargs):
136 return favicon_url()
137
[end of kuma/landing/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -24,19 +24,16 @@
@shared_cache_control
def home(request):
"""Home page."""
+ context = {}
+ # Need for both wiki and react homepage
+ context['updates'] = list(
+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
+
+ # The default template name
+ template_name = 'landing/react_homepage.html'
if is_wiki(request):
- return render_home(request, 'landing/homepage.html')
- return render_home(request, 'landing/react_homepage.html')
-
-
-def render_home(request, template_name):
- """Render the home page with the template named "template_name"."""
- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
- default_filters = Filter.objects.default_filters()
- context = {
- 'updates': updates,
- 'default_filters': default_filters,
- }
+ template_name = 'landing/homepage.html'
+ context['default_filters'] = Filter.objects.default_filters()
return render(request, template_name, context)
|
{"golden_diff": "diff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -24,19 +24,16 @@\n @shared_cache_control\n def home(request):\n \"\"\"Home page.\"\"\"\n+ context = {}\n+ # Need for both wiki and react homepage\n+ context['updates'] = list(\n+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n+\n+ # The default template name\n+ template_name = 'landing/react_homepage.html'\n if is_wiki(request):\n- return render_home(request, 'landing/homepage.html')\n- return render_home(request, 'landing/react_homepage.html')\n-\n-\n-def render_home(request, template_name):\n- \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n- default_filters = Filter.objects.default_filters()\n- context = {\n- 'updates': updates,\n- 'default_filters': default_filters,\n- }\n+ template_name = 'landing/homepage.html'\n+ context['default_filters'] = Filter.objects.default_filters()\n return render(request, template_name, context)\n", "issue": "Unnecessary SQL select for react home landing page\nThe React landing page view extracts the default filters. Every time but it's never used. \r\n<img width=\"1502\" alt=\"Screen Shot 2019-08-13 at 2 44 39 PM\" src=\"https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png\">\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, 'contribute.json', document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n if is_wiki(request):\n return render_home(request, 'landing/homepage.html')\n return render_home(request, 'landing/react_homepage.html')\n\n\ndef render_home(request, template_name):\n \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n default_filters = Filter.objects.default_filters()\n context = {\n 'updates': updates,\n 'default_filters': default_filters,\n }\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, 'landing/maintenance-mode.html')\n else:\n return redirect('home')\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, 'landing/promote_buttons.html')\n\n\nROBOTS_ALLOWED_TXT = '''\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n''' + '\\n'.join('Disallow: /{locale}/search'.format(locale=locale)\n for locale in settings.ENABLED_LOCALES)\n\nROBOTS_GO_AWAY_TXT = '''\\\nUser-Agent: *\nDisallow: /\n'''\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type='text/plain')\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}]}
| 1,922 | 275 |
gh_patches_debug_1310
|
rasdani/github-patches
|
git_diff
|
hylang__hy-161
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LIST-COMP breaks with certain variable names
Try compiling:
```
(list-comp (, i j) (i [-1 0 1] j [-1 0 1]))
```
With hy and you'll get some strange errors. If you replace "i" and "j" with "x" and "y" respectively, the same piece of code works as expected.
</issue>
<code>
[start of hy/lex/states.py]
1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from hy.models.expression import HyExpression
22 from hy.models.integer import HyInteger
23 from hy.models.lambdalist import HyLambdaListKeyword
24 from hy.models.float import HyFloat
25 from hy.models.complex import HyComplex
26 from hy.models.symbol import HySymbol
27 from hy.models.string import HyString
28 from hy.models.keyword import HyKeyword
29 from hy.models.dict import HyDict
30 from hy.models.list import HyList
31
32 from hy.errors import HyError
33
34 from abc import ABCMeta, abstractmethod
35
36
37 WHITESPACE = [" ", "\t", "\n", "\r"]
38
39
40 class LexException(HyError):
41 """
42 Error during the Lexing of a Hython expression.
43 """
44 pass
45
46
47 def _resolve_atom(obj):
48 """
49 Resolve a bare atom into one of the following (in order):
50
51 - Integer
52 - LambdaListKeyword
53 - Float
54 - Complex
55 - Symbol
56 """
57 try:
58 return HyInteger(obj)
59 except ValueError:
60 pass
61
62 if obj.startswith("&"):
63 return HyLambdaListKeyword(obj)
64
65 try:
66 return HyFloat(obj)
67 except ValueError:
68 pass
69
70 try:
71 return HyComplex(obj)
72 except ValueError:
73 pass
74
75 table = {
76 "true": "True",
77 "false": "False",
78 "null": "None",
79 }
80
81 if obj in table:
82 return HySymbol(table[obj])
83
84 if obj.startswith(":"):
85 return HyKeyword(obj)
86
87 if obj.startswith("*") and obj.endswith("*") and obj not in ("*", "**"):
88 obj = obj[1:-1].upper()
89
90 if "-" in obj and obj != "-":
91 obj = obj.replace("-", "_")
92
93 return HySymbol(obj)
94
95
96 class State(object):
97 """
98 Generic State model.
99 """
100
101 __slots__ = ("nodes", "machine")
102 __metaclass__ = ABCMeta
103
104 def __init__(self, machine):
105 self.machine = machine
106
107 def _enter(self):
108 """ Internal shim for running global ``enter`` code """
109 self.result = None
110 self.nodes = []
111 self.enter()
112
113 def _exit(self):
114 """ Internal shim for running global ``exit`` code """
115 self.exit()
116
117 def enter(self):
118 """
119 Overridable ``enter`` routines. Subclasses may implement this.
120 """
121 pass
122
123 def exit(self):
124 """
125 Overridable ``exit`` routines. Subclasses may implement this.
126 """
127 pass
128
129 @abstractmethod
130 def process(self, char):
131 """
132 Overridable ``process`` routines. Subclasses must implement this to be
133 useful.
134 """
135 pass # ABC
136
137
138 class ListeyThing(State):
139
140 def enter(self):
141 self.buf = ""
142
143 def commit(self):
144 if self.buf != "":
145 ret = _resolve_atom(self.buf)
146 ret.start_line = self._start_line
147 ret.start_column = self._start_column
148 ret.end_line = self.machine.line
149 ret.end_column = (self.machine.column - 1)
150
151 self.nodes.append(ret)
152 self.buf = ""
153
154 def exit(self):
155 self.commit()
156 self.result = self.result_type(self.nodes)
157
158 def process(self, char):
159 if char == "(":
160 self.commit()
161 self.machine.sub(Expression)
162 return
163
164 if char == "{":
165 self.commit()
166 self.machine.sub(Dict)
167 return
168
169 if char == "[":
170 self.commit()
171 self.machine.sub(List)
172 return
173
174 if char == "\"":
175 self.commit()
176 self.machine.sub(String)
177 return
178
179 if char == ";":
180 self.commit()
181 self.machine.sub(Comment)
182 return
183
184 if char == self.end_char:
185 return Idle
186
187 if char in ")]}":
188 raise LexException("Unexpected closing character: `%s'" % (char))
189
190 if char in WHITESPACE:
191 self.commit()
192 return
193
194 if self.buf == "":
195 self._start_line = self.machine.line
196 self._start_column = self.machine.column
197
198 self.buf += char
199
200
201 class List(ListeyThing):
202 """
203 This state parses a Hy list (like a Clojure vector) for use in native
204 Python interop.
205
206 [foo 1 2 3 4] is a good example.
207 """
208
209 result_type = HyList
210 end_char = "]"
211
212
213 class Expression(ListeyThing):
214 """
215 This state parses a Hy expression (statement, to be evaluated at runtime)
216 for running things & stuff.
217 """
218
219 result_type = HyExpression
220 end_char = ")"
221
222
223 class Dict(ListeyThing):
224 """
225 This state parses a Hy dict for things.
226 """
227
228 def exit(self):
229 self.commit()
230 it = iter(self.nodes)
231 result = dict(zip(it, it))
232 self.result = HyDict(result)
233
234 end_char = "}"
235
236
237 class String(State):
238 """
239 String state. This will handle stuff like:
240
241 (println "foobar")
242 ^^^^^^^^ -- String
243 """
244
245 def enter(self):
246 self.escaped = False
247
248 def exit(self):
249 self.result = HyString("".join(self.nodes))
250
251 def process(self, char):
252 """
253 State transitions:
254
255 - " - Idle
256 """
257 if self.escaped:
258 self.escaped = False
259 if char == "n":
260 self.nodes.append("\n")
261 return
262 if char == "\\":
263 self.nodes.append("\\")
264 return
265 if char == "\"":
266 self.nodes.append("\"")
267 return
268
269 raise LexException("Unknown modifier: `%s'" % (char))
270
271 if char == "\"":
272 return Idle
273
274 if char == "\\":
275 self.escaped = True
276 return
277
278 self.nodes.append(char)
279
280
281 class Atom(State):
282 """
283 This state parses integer constants, boolean constants, and symbols
284 """
285
286 def __init__(self, machine):
287 State.__init__(self, machine)
288 self.initial_buf = ''
289
290 def enter(self):
291 self.buf = self.initial_buf
292
293 def exit(self):
294 self.result = _resolve_atom(self.buf)
295
296 def process(self, char):
297 """
298 State transitions:
299
300 - WHITESPACE - Idle
301 - ; - Comment
302 """
303
304 if char in WHITESPACE:
305 return Idle
306
307 if char == ";":
308 return Comment
309
310 self.buf += char
311
312
313 def AtomStartingWith(initial_char):
314 def AtomFactory(machine):
315 state = Atom(machine)
316 state.initial_buf = initial_char
317 return state
318 return AtomFactory
319
320
321 class Idle(State):
322 """
323 Idle state. This is the first (and last) thing that we should
324 be in.
325 """
326
327 def process(self, char):
328 """
329 State transitions:
330
331 - ( - Expression
332 - [ - List
333 - { - Dict
334 - \" - String
335 - ; - Comment
336 - # - Hash
337 - (default) - Atom
338 """
339
340 if char == "(":
341 return Expression
342
343 if char == "[":
344 return List
345
346 if char == "{":
347 return Dict
348
349 if char == "\"":
350 return String
351
352 if char == ";":
353 return Comment
354
355 if char == "#":
356 return Hash
357
358 if char in WHITESPACE:
359 return
360
361 return AtomStartingWith(char)
362
363
364 class Comment(State):
365 """
366 Comment state.
367 """
368
369 def process(self, char):
370 """
371 State transitions:
372
373 - \n - Idle
374 - (default) - disregard.
375 """
376
377 if char == "\n":
378 return Idle
379
380
381 class Hash(State):
382 """
383 Hash state
384 """
385
386 def process(self, char):
387 """
388 State transitions:
389
390 - ! - Comment
391 """
392
393 if char == "!":
394 return Comment
395
396 raise LexException("Unknown char (Hash state): `%s'" % (char))
397
[end of hy/lex/states.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hy/lex/states.py b/hy/lex/states.py
--- a/hy/lex/states.py
+++ b/hy/lex/states.py
@@ -67,10 +67,11 @@
except ValueError:
pass
- try:
- return HyComplex(obj)
- except ValueError:
- pass
+ if obj != "j":
+ try:
+ return HyComplex(obj)
+ except ValueError:
+ pass
table = {
"true": "True",
|
{"golden_diff": "diff --git a/hy/lex/states.py b/hy/lex/states.py\n--- a/hy/lex/states.py\n+++ b/hy/lex/states.py\n@@ -67,10 +67,11 @@\n except ValueError:\n pass\n \n- try:\n- return HyComplex(obj)\n- except ValueError:\n- pass\n+ if obj != \"j\":\n+ try:\n+ return HyComplex(obj)\n+ except ValueError:\n+ pass\n \n table = {\n \"true\": \"True\",\n", "issue": "LIST-COMP breaks with certain variable names\nTry compiling:\n\n```\n(list-comp (, i j) (i [-1 0 1] j [-1 0 1]))\n```\n\nWith hy and you'll get some strange errors. If you replace \"i\" and \"j\" with \"x\" and \"y\" respectively, the same piece of code works as expected.\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.integer import HyInteger\nfrom hy.models.lambdalist import HyLambdaListKeyword\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.symbol import HySymbol\nfrom hy.models.string import HyString\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.dict import HyDict\nfrom hy.models.list import HyList\n\nfrom hy.errors import HyError\n\nfrom abc import ABCMeta, abstractmethod\n\n\nWHITESPACE = [\" \", \"\\t\", \"\\n\", \"\\r\"]\n\n\nclass LexException(HyError):\n \"\"\"\n Error during the Lexing of a Hython expression.\n \"\"\"\n pass\n\n\ndef _resolve_atom(obj):\n \"\"\"\n Resolve a bare atom into one of the following (in order):\n\n - Integer\n - LambdaListKeyword\n - Float\n - Complex\n - Symbol\n \"\"\"\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n if obj.startswith(\"&\"):\n return HyLambdaListKeyword(obj)\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n if obj.startswith(\"*\") and obj.endswith(\"*\") and obj not in (\"*\", \"**\"):\n obj = obj[1:-1].upper()\n\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n\n return HySymbol(obj)\n\n\nclass State(object):\n \"\"\"\n Generic State model.\n \"\"\"\n\n __slots__ = (\"nodes\", \"machine\")\n __metaclass__ = ABCMeta\n\n def __init__(self, machine):\n self.machine = machine\n\n def _enter(self):\n \"\"\" Internal shim for running global ``enter`` code \"\"\"\n self.result = None\n self.nodes = []\n self.enter()\n\n def _exit(self):\n \"\"\" Internal shim for running global ``exit`` code \"\"\"\n self.exit()\n\n def enter(self):\n \"\"\"\n Overridable ``enter`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n def exit(self):\n \"\"\"\n Overridable ``exit`` routines. Subclasses may implement this.\n \"\"\"\n pass\n\n @abstractmethod\n def process(self, char):\n \"\"\"\n Overridable ``process`` routines. Subclasses must implement this to be\n useful.\n \"\"\"\n pass # ABC\n\n\nclass ListeyThing(State):\n\n def enter(self):\n self.buf = \"\"\n\n def commit(self):\n if self.buf != \"\":\n ret = _resolve_atom(self.buf)\n ret.start_line = self._start_line\n ret.start_column = self._start_column\n ret.end_line = self.machine.line\n ret.end_column = (self.machine.column - 1)\n\n self.nodes.append(ret)\n self.buf = \"\"\n\n def exit(self):\n self.commit()\n self.result = self.result_type(self.nodes)\n\n def process(self, char):\n if char == \"(\":\n self.commit()\n self.machine.sub(Expression)\n return\n\n if char == \"{\":\n self.commit()\n self.machine.sub(Dict)\n return\n\n if char == \"[\":\n self.commit()\n self.machine.sub(List)\n return\n\n if char == \"\\\"\":\n self.commit()\n self.machine.sub(String)\n return\n\n if char == \";\":\n self.commit()\n self.machine.sub(Comment)\n return\n\n if char == self.end_char:\n return Idle\n\n if char in \")]}\":\n raise LexException(\"Unexpected closing character: `%s'\" % (char))\n\n if char in WHITESPACE:\n self.commit()\n return\n\n if self.buf == \"\":\n self._start_line = self.machine.line\n self._start_column = self.machine.column\n\n self.buf += char\n\n\nclass List(ListeyThing):\n \"\"\"\n This state parses a Hy list (like a Clojure vector) for use in native\n Python interop.\n\n [foo 1 2 3 4] is a good example.\n \"\"\"\n\n result_type = HyList\n end_char = \"]\"\n\n\nclass Expression(ListeyThing):\n \"\"\"\n This state parses a Hy expression (statement, to be evaluated at runtime)\n for running things & stuff.\n \"\"\"\n\n result_type = HyExpression\n end_char = \")\"\n\n\nclass Dict(ListeyThing):\n \"\"\"\n This state parses a Hy dict for things.\n \"\"\"\n\n def exit(self):\n self.commit()\n it = iter(self.nodes)\n result = dict(zip(it, it))\n self.result = HyDict(result)\n\n end_char = \"}\"\n\n\nclass String(State):\n \"\"\"\n String state. This will handle stuff like:\n\n (println \"foobar\")\n ^^^^^^^^ -- String\n \"\"\"\n\n def enter(self):\n self.escaped = False\n\n def exit(self):\n self.result = HyString(\"\".join(self.nodes))\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \" - Idle\n \"\"\"\n if self.escaped:\n self.escaped = False\n if char == \"n\":\n self.nodes.append(\"\\n\")\n return\n if char == \"\\\\\":\n self.nodes.append(\"\\\\\")\n return\n if char == \"\\\"\":\n self.nodes.append(\"\\\"\")\n return\n\n raise LexException(\"Unknown modifier: `%s'\" % (char))\n\n if char == \"\\\"\":\n return Idle\n\n if char == \"\\\\\":\n self.escaped = True\n return\n\n self.nodes.append(char)\n\n\nclass Atom(State):\n \"\"\"\n This state parses integer constants, boolean constants, and symbols\n \"\"\"\n\n def __init__(self, machine):\n State.__init__(self, machine)\n self.initial_buf = ''\n\n def enter(self):\n self.buf = self.initial_buf\n\n def exit(self):\n self.result = _resolve_atom(self.buf)\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - WHITESPACE - Idle\n - ; - Comment\n \"\"\"\n\n if char in WHITESPACE:\n return Idle\n\n if char == \";\":\n return Comment\n\n self.buf += char\n\n\ndef AtomStartingWith(initial_char):\n def AtomFactory(machine):\n state = Atom(machine)\n state.initial_buf = initial_char\n return state\n return AtomFactory\n\n\nclass Idle(State):\n \"\"\"\n Idle state. This is the first (and last) thing that we should\n be in.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ( - Expression\n - [ - List\n - { - Dict\n - \\\" - String\n - ; - Comment\n - # - Hash\n - (default) - Atom\n \"\"\"\n\n if char == \"(\":\n return Expression\n\n if char == \"[\":\n return List\n\n if char == \"{\":\n return Dict\n\n if char == \"\\\"\":\n return String\n\n if char == \";\":\n return Comment\n\n if char == \"#\":\n return Hash\n\n if char in WHITESPACE:\n return\n\n return AtomStartingWith(char)\n\n\nclass Comment(State):\n \"\"\"\n Comment state.\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - \\n - Idle\n - (default) - disregard.\n \"\"\"\n\n if char == \"\\n\":\n return Idle\n\n\nclass Hash(State):\n \"\"\"\n Hash state\n \"\"\"\n\n def process(self, char):\n \"\"\"\n State transitions:\n\n - ! - Comment\n \"\"\"\n\n if char == \"!\":\n return Comment\n\n raise LexException(\"Unknown char (Hash state): `%s'\" % (char))\n", "path": "hy/lex/states.py"}]}
| 3,697 | 120 |
gh_patches_debug_41801
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-1620
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add encoding to `read_env_file()`
# Feature Request
Output of `import pydantic.utils; print(pydantic.utils.version_info())`:
```
pydantic version: 1.5.1
pydantic compiled: True
python version: 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]
platform: Windows-7-6.1.7601-SP1
optional deps. installed: ['email-validator']
```
## Description
Hi, there's known problem on Windows with parsing dotenv files - https://github.com/pypa/pipenv/issues/1963. `python-dotenv` would parse files with default encoding (cp1251 for Cyrillic Windows). As a result we get `Лист 1` instead of `Лист 1`.
It looks like [this](https://github.com/samuelcolvin/pydantic/blob/960b24a5aab7ae0631bfbfbe0047b4d8600c6012/pydantic/env_settings.py#L111) function need to fetch encoding from `Config` class somehow.
<!-- Where possible please include a self-contained code snippet describing your feature request: -->
## Example
`.env` file (UTF-8):
```
foo=Лист 1
```
Code snippet:
```py
import pydantic
class Settings(pydantic.BaseSettings):
foo: str
class Config:
env_file_encoding = 'utf-8'
settings = Settings(_env_file='.env')
print(settings)
# foo='Лист 1'
```
</issue>
<code>
[start of pydantic/env_settings.py]
1 import os
2 import warnings
3 from pathlib import Path
4 from typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union
5
6 from .fields import ModelField
7 from .main import BaseModel, Extra
8 from .typing import display_as_type
9 from .utils import deep_update, sequence_like
10
11 env_file_sentinel = str(object())
12
13
14 class SettingsError(ValueError):
15 pass
16
17
18 class BaseSettings(BaseModel):
19 """
20 Base class for settings, allowing values to be overridden by environment variables.
21
22 This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),
23 Heroku and any 12 factor app design.
24 """
25
26 def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:
27 # Uses something other than `self` the first arg to allow "self" as a settable attribute
28 super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))
29
30 def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:
31 return deep_update(self._build_environ(_env_file), init_kwargs)
32
33 def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:
34 """
35 Build environment variables suitable for passing to the Model.
36 """
37 d: Dict[str, Optional[str]] = {}
38
39 if self.__config__.case_sensitive:
40 env_vars: Mapping[str, Optional[str]] = os.environ
41 else:
42 env_vars = {k.lower(): v for k, v in os.environ.items()}
43
44 env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file
45 if env_file is not None:
46 env_path = Path(env_file)
47 if env_path.is_file():
48 env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}
49
50 for field in self.__fields__.values():
51 env_val: Optional[str] = None
52 for env_name in field.field_info.extra['env_names']:
53 env_val = env_vars.get(env_name)
54 if env_val is not None:
55 break
56
57 if env_val is None:
58 continue
59
60 if field.is_complex():
61 try:
62 env_val = self.__config__.json_loads(env_val) # type: ignore
63 except ValueError as e:
64 raise SettingsError(f'error parsing JSON for "{env_name}"') from e
65 d[field.alias] = env_val
66 return d
67
68 class Config:
69 env_prefix = ''
70 env_file = None
71 validate_all = True
72 extra = Extra.forbid
73 arbitrary_types_allowed = True
74 case_sensitive = False
75
76 @classmethod
77 def prepare_field(cls, field: ModelField) -> None:
78 env_names: Union[List[str], AbstractSet[str]]
79 env = field.field_info.extra.get('env')
80 if env is None:
81 if field.has_alias:
82 warnings.warn(
83 'aliases are no longer used by BaseSettings to define which environment variables to read. '
84 'Instead use the "env" field setting. '
85 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',
86 FutureWarning,
87 )
88 env_names = {cls.env_prefix + field.name}
89 elif isinstance(env, str):
90 env_names = {env}
91 elif isinstance(env, (set, frozenset)):
92 env_names = env
93 elif sequence_like(env):
94 env_names = list(env)
95 else:
96 raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')
97
98 if not cls.case_sensitive:
99 env_names = env_names.__class__(n.lower() for n in env_names)
100 field.field_info.extra['env_names'] = env_names
101
102 __config__: Config # type: ignore
103
104
105 def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
106 try:
107 from dotenv import dotenv_values
108 except ImportError as e:
109 raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
110
111 file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)
112 if not case_sensitive:
113 return {k.lower(): v for k, v in file_vars.items()}
114 else:
115 return file_vars
116
[end of pydantic/env_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pydantic/env_settings.py b/pydantic/env_settings.py
--- a/pydantic/env_settings.py
+++ b/pydantic/env_settings.py
@@ -23,14 +23,28 @@
Heroku and any 12 factor app design.
"""
- def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:
+ def __init__(
+ __pydantic_self__,
+ _env_file: Union[Path, str, None] = env_file_sentinel,
+ _env_file_encoding: Optional[str] = None,
+ **values: Any,
+ ) -> None:
# Uses something other than `self` the first arg to allow "self" as a settable attribute
- super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))
-
- def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:
- return deep_update(self._build_environ(_env_file), init_kwargs)
-
- def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:
+ super().__init__(
+ **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)
+ )
+
+ def _build_values(
+ self,
+ init_kwargs: Dict[str, Any],
+ _env_file: Union[Path, str, None] = None,
+ _env_file_encoding: Optional[str] = None,
+ ) -> Dict[str, Any]:
+ return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)
+
+ def _build_environ(
+ self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None
+ ) -> Dict[str, Optional[str]]:
"""
Build environment variables suitable for passing to the Model.
"""
@@ -42,10 +56,16 @@
env_vars = {k.lower(): v for k, v in os.environ.items()}
env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file
+ env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding
if env_file is not None:
env_path = Path(env_file)
if env_path.is_file():
- env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}
+ env_vars = {
+ **read_env_file(
+ env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive
+ ),
+ **env_vars,
+ }
for field in self.__fields__.values():
env_val: Optional[str] = None
@@ -68,6 +88,7 @@
class Config:
env_prefix = ''
env_file = None
+ env_file_encoding = None
validate_all = True
extra = Extra.forbid
arbitrary_types_allowed = True
@@ -102,13 +123,13 @@
__config__: Config # type: ignore
-def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
+def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
try:
from dotenv import dotenv_values
except ImportError as e:
raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
- file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)
+ file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)
if not case_sensitive:
return {k.lower(): v for k, v in file_vars.items()}
else:
|
{"golden_diff": "diff --git a/pydantic/env_settings.py b/pydantic/env_settings.py\n--- a/pydantic/env_settings.py\n+++ b/pydantic/env_settings.py\n@@ -23,14 +23,28 @@\n Heroku and any 12 factor app design.\n \"\"\"\n \n- def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:\n+ def __init__(\n+ __pydantic_self__,\n+ _env_file: Union[Path, str, None] = env_file_sentinel,\n+ _env_file_encoding: Optional[str] = None,\n+ **values: Any,\n+ ) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n- super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))\n-\n- def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:\n- return deep_update(self._build_environ(_env_file), init_kwargs)\n-\n- def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:\n+ super().__init__(\n+ **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)\n+ )\n+\n+ def _build_values(\n+ self,\n+ init_kwargs: Dict[str, Any],\n+ _env_file: Union[Path, str, None] = None,\n+ _env_file_encoding: Optional[str] = None,\n+ ) -> Dict[str, Any]:\n+ return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)\n+\n+ def _build_environ(\n+ self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None\n+ ) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n@@ -42,10 +56,16 @@\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n \n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n+ env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n- env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}\n+ env_vars = {\n+ **read_env_file(\n+ env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive\n+ ),\n+ **env_vars,\n+ }\n \n for field in self.__fields__.values():\n env_val: Optional[str] = None\n@@ -68,6 +88,7 @@\n class Config:\n env_prefix = ''\n env_file = None\n+ env_file_encoding = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n@@ -102,13 +123,13 @@\n __config__: Config # type: ignore\n \n \n-def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n+def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n \n- file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)\n+ file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n", "issue": "Add encoding to `read_env_file()`\n# Feature Request\r\n\r\nOutput of `import pydantic.utils; print(pydantic.utils.version_info())`:\r\n\r\n```\r\n pydantic version: 1.5.1\r\n pydantic compiled: True\r\n python version: 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\r\n platform: Windows-7-6.1.7601-SP1\r\n optional deps. installed: ['email-validator']\r\n```\r\n\r\n## Description\r\n\r\nHi, there's known problem on Windows with parsing dotenv files - https://github.com/pypa/pipenv/issues/1963. `python-dotenv` would parse files with default encoding (cp1251 for Cyrillic Windows). As a result we get `\u0420\u203a\u0420\u0451\u0421\u0403\u0421\u201a 1` instead of `\u041b\u0438\u0441\u0442 1`.\r\n\r\nIt looks like [this](https://github.com/samuelcolvin/pydantic/blob/960b24a5aab7ae0631bfbfbe0047b4d8600c6012/pydantic/env_settings.py#L111) function need to fetch encoding from `Config` class somehow.\r\n\r\n<!-- Where possible please include a self-contained code snippet describing your feature request: -->\r\n\r\n## Example\r\n\r\n`.env` file (UTF-8):\r\n\r\n```\r\nfoo=\u041b\u0438\u0441\u0442 1\r\n```\r\n\r\nCode snippet:\r\n\r\n```py\r\nimport pydantic\r\n\r\nclass Settings(pydantic.BaseSettings):\r\n foo: str\r\n\r\n class Config:\r\n env_file_encoding = 'utf-8'\r\n\r\nsettings = Settings(_env_file='.env')\r\nprint(settings)\r\n# foo='\u041b\u0438\u0441\u0442 1'\r\n```\r\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom pathlib import Path\nfrom typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union\n\nfrom .fields import ModelField\nfrom .main import BaseModel, Extra\nfrom .typing import display_as_type\nfrom .utils import deep_update, sequence_like\n\nenv_file_sentinel = str(object())\n\n\nclass SettingsError(ValueError):\n pass\n\n\nclass BaseSettings(BaseModel):\n \"\"\"\n Base class for settings, allowing values to be overridden by environment variables.\n\n This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),\n Heroku and any 12 factor app design.\n \"\"\"\n\n def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))\n\n def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:\n return deep_update(self._build_environ(_env_file), init_kwargs)\n\n def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n d: Dict[str, Optional[str]] = {}\n\n if self.__config__.case_sensitive:\n env_vars: Mapping[str, Optional[str]] = os.environ\n else:\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n\n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}\n\n for field in self.__fields__.values():\n env_val: Optional[str] = None\n for env_name in field.field_info.extra['env_names']:\n env_val = env_vars.get(env_name)\n if env_val is not None:\n break\n\n if env_val is None:\n continue\n\n if field.is_complex():\n try:\n env_val = self.__config__.json_loads(env_val) # type: ignore\n except ValueError as e:\n raise SettingsError(f'error parsing JSON for \"{env_name}\"') from e\n d[field.alias] = env_val\n return d\n\n class Config:\n env_prefix = ''\n env_file = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n case_sensitive = False\n\n @classmethod\n def prepare_field(cls, field: ModelField) -> None:\n env_names: Union[List[str], AbstractSet[str]]\n env = field.field_info.extra.get('env')\n if env is None:\n if field.has_alias:\n warnings.warn(\n 'aliases are no longer used by BaseSettings to define which environment variables to read. '\n 'Instead use the \"env\" field setting. '\n 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',\n FutureWarning,\n )\n env_names = {cls.env_prefix + field.name}\n elif isinstance(env, str):\n env_names = {env}\n elif isinstance(env, (set, frozenset)):\n env_names = env\n elif sequence_like(env):\n env_names = list(env)\n else:\n raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')\n\n if not cls.case_sensitive:\n env_names = env_names.__class__(n.lower() for n in env_names)\n field.field_info.extra['env_names'] = env_names\n\n __config__: Config # type: ignore\n\n\ndef read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n\n file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n return file_vars\n", "path": "pydantic/env_settings.py"}]}
| 2,191 | 926 |
gh_patches_debug_9183
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-492
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
model/text_classification 使用内置语料集训练模型后,预测时出错,具体如下
```text
I1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1
Traceback (most recent call last):
File "infer.py", line 91, in <module>
batch_size=10)
File "infer.py", line 58, in infer
for idx, item in enumerate(test_reader):
TypeError: 'function' object is not iterable
```
paddle镜像用的是https://hub.docker.com/r/paddlepaddle/paddle/tags/ latest的,model下载的最新的
</issue>
<code>
[start of text_classification/infer.py]
1 import sys
2 import os
3 import gzip
4
5 import paddle.v2 as paddle
6
7 import reader
8 from network_conf import fc_net, convolution_net
9 from utils import logger, load_dict, load_reverse_dict
10
11
12 def infer(topology, data_dir, model_path, word_dict_path, label_dict_path,
13 batch_size):
14 def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):
15 probs = inferer.infer(input=test_batch, field=["value"])
16 assert len(probs) == len(test_batch)
17 for word_ids, prob in zip(test_batch, probs):
18 word_text = " ".join([ids_2_word[id] for id in word_ids[0]])
19 print("%s\t%s\t%s" % (ids_2_label[prob.argmax()],
20 " ".join(["{:0.4f}".format(p)
21 for p in prob]), word_text))
22
23 logger.info("begin to predict...")
24 use_default_data = (data_dir is None)
25
26 if use_default_data:
27 word_dict = paddle.dataset.imdb.word_dict()
28 word_reverse_dict = dict((value, key)
29 for key, value in word_dict.iteritems())
30 label_reverse_dict = {0: "positive", 1: "negative"}
31 test_reader = paddle.dataset.imdb.test(word_dict)
32 else:
33 assert os.path.exists(
34 word_dict_path), "the word dictionary file does not exist"
35 assert os.path.exists(
36 label_dict_path), "the label dictionary file does not exist"
37
38 word_dict = load_dict(word_dict_path)
39 word_reverse_dict = load_reverse_dict(word_dict_path)
40 label_reverse_dict = load_reverse_dict(label_dict_path)
41
42 test_reader = reader.test_reader(data_dir, word_dict)()
43
44 dict_dim = len(word_dict)
45 class_num = len(label_reverse_dict)
46 prob_layer = topology(dict_dim, class_num, is_infer=True)
47
48 # initialize PaddlePaddle
49 paddle.init(use_gpu=False, trainer_count=1)
50
51 # load the trained models
52 parameters = paddle.parameters.Parameters.from_tar(
53 gzip.open(model_path, "r"))
54 inferer = paddle.inference.Inference(
55 output_layer=prob_layer, parameters=parameters)
56
57 test_batch = []
58 for idx, item in enumerate(test_reader):
59 test_batch.append([item[0]])
60 if len(test_batch) == batch_size:
61 _infer_a_batch(inferer, test_batch, word_reverse_dict,
62 label_reverse_dict)
63 test_batch = []
64
65 if len(test_batch):
66 _infer_a_batch(inferer, test_batch, word_reverse_dict,
67 label_reverse_dict)
68 test_batch = []
69
70
71 if __name__ == "__main__":
72 model_path = "models/dnn_params_pass_00000.tar.gz"
73 assert os.path.exists(model_path), "the trained model does not exist."
74
75 nn_type = "dnn"
76 test_dir = None
77 word_dict = None
78 label_dict = None
79
80 if nn_type == "dnn":
81 topology = fc_net
82 elif nn_type == "cnn":
83 topology = convolution_net
84
85 infer(
86 topology=topology,
87 data_dir=test_dir,
88 word_dict_path=word_dict,
89 label_dict_path=label_dict,
90 model_path=model_path,
91 batch_size=10)
92
[end of text_classification/infer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/text_classification/infer.py b/text_classification/infer.py
--- a/text_classification/infer.py
+++ b/text_classification/infer.py
@@ -28,7 +28,7 @@
word_reverse_dict = dict((value, key)
for key, value in word_dict.iteritems())
label_reverse_dict = {0: "positive", 1: "negative"}
- test_reader = paddle.dataset.imdb.test(word_dict)
+ test_reader = paddle.dataset.imdb.test(word_dict)()
else:
assert os.path.exists(
word_dict_path), "the word dictionary file does not exist"
|
{"golden_diff": "diff --git a/text_classification/infer.py b/text_classification/infer.py\n--- a/text_classification/infer.py\n+++ b/text_classification/infer.py\n@@ -28,7 +28,7 @@\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n- test_reader = paddle.dataset.imdb.test(word_dict)\n+ test_reader = paddle.dataset.imdb.test(word_dict)()\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n", "issue": "model/text_classification \u4f7f\u7528\u5185\u7f6e\u8bed\u6599\u96c6\u8bad\u7ec3\u6a21\u578b\u540e\uff0c\u9884\u6d4b\u65f6\u51fa\u9519\uff0c\u5177\u4f53\u5982\u4e0b\n```text\r\nI1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1 \r\nTraceback (most recent call last):\r\n File \"infer.py\", line 91, in <module>\r\n batch_size=10)\r\n File \"infer.py\", line 58, in infer\r\n for idx, item in enumerate(test_reader):\r\nTypeError: 'function' object is not iterable\r\n```\r\npaddle\u955c\u50cf\u7528\u7684\u662fhttps://hub.docker.com/r/paddlepaddle/paddle/tags/ latest\u7684\uff0cmodel\u4e0b\u8f7d\u7684\u6700\u65b0\u7684\n", "before_files": [{"content": "import sys\nimport os\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom network_conf import fc_net, convolution_net\nfrom utils import logger, load_dict, load_reverse_dict\n\n\ndef infer(topology, data_dir, model_path, word_dict_path, label_dict_path,\n batch_size):\n def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):\n probs = inferer.infer(input=test_batch, field=[\"value\"])\n assert len(probs) == len(test_batch)\n for word_ids, prob in zip(test_batch, probs):\n word_text = \" \".join([ids_2_word[id] for id in word_ids[0]])\n print(\"%s\\t%s\\t%s\" % (ids_2_label[prob.argmax()],\n \" \".join([\"{:0.4f}\".format(p)\n for p in prob]), word_text))\n\n logger.info(\"begin to predict...\")\n use_default_data = (data_dir is None)\n\n if use_default_data:\n word_dict = paddle.dataset.imdb.word_dict()\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n test_reader = paddle.dataset.imdb.test(word_dict)\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n assert os.path.exists(\n label_dict_path), \"the label dictionary file does not exist\"\n\n word_dict = load_dict(word_dict_path)\n word_reverse_dict = load_reverse_dict(word_dict_path)\n label_reverse_dict = load_reverse_dict(label_dict_path)\n\n test_reader = reader.test_reader(data_dir, word_dict)()\n\n dict_dim = len(word_dict)\n class_num = len(label_reverse_dict)\n prob_layer = topology(dict_dim, class_num, is_infer=True)\n\n # initialize PaddlePaddle\n paddle.init(use_gpu=False, trainer_count=1)\n\n # load the trained models\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(model_path, \"r\"))\n inferer = paddle.inference.Inference(\n output_layer=prob_layer, parameters=parameters)\n\n test_batch = []\n for idx, item in enumerate(test_reader):\n test_batch.append([item[0]])\n if len(test_batch) == batch_size:\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n if len(test_batch):\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n\nif __name__ == \"__main__\":\n model_path = \"models/dnn_params_pass_00000.tar.gz\"\n assert os.path.exists(model_path), \"the trained model does not exist.\"\n\n nn_type = \"dnn\"\n test_dir = None\n word_dict = None\n label_dict = None\n\n if nn_type == \"dnn\":\n topology = fc_net\n elif nn_type == \"cnn\":\n topology = convolution_net\n\n infer(\n topology=topology,\n data_dir=test_dir,\n word_dict_path=word_dict,\n label_dict_path=label_dict,\n model_path=model_path,\n batch_size=10)\n", "path": "text_classification/infer.py"}]}
| 1,590 | 132 |
gh_patches_debug_55295
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-799
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docker Python no longer working as soon as elastic-apm is installed
**Describe the bug**
It seems like some patches to requests destroy the functionality of the docker python package.
ValueError: invalid literal for int() with base 10: 'None'
```
Traceback (most recent call last):
File "project\venv\lib\site-packages\django\core\handlers\exception.py", line 34, in inner
response = get_response(request)
File "project\venv\lib\site-packages\django\core\handlers\base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "project\venv\lib\site-packages\django\core\handlers\base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "project\tool\src\company_tool\web\api\decorators.py", line 21, in wrapper_view_func
return view_func(request, json_data=data, *args, **kwargs)
File "project\venv\lib\site-packages\django\views\decorators\csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "project\venv\lib\site-packages\django\views\decorators\http.py", line 40, in inner
return func(request, *args, **kwargs)
File "project\tool\src\company_tool\web\api\views.py", line 27, in start
container_status = client.containers.run(image=container, command=arguments, detach=True, stdin_open=True, tty=True)
File "project\venv\lib\site-packages\docker\models\containers.py", line 802, in run
container = self.create(image=image, command=command,
File "project\venv\lib\site-packages\docker\models\containers.py", line 861, in create
resp = self.client.api.create_container(**create_kwargs)
File "project\venv\lib\site-packages\docker\api\container.py", line 430, in create_container
return self.create_container_from_config(config, name)
File "project\venv\lib\site-packages\docker\api\container.py", line 440, in create_container_from_config
res = self._post_json(u, data=config, params=params)
File "project\venv\lib\site-packages\docker\api\client.py", line 289, in _post_json
return self._post(url, data=json.dumps(data2), **kwargs)
File "project\venv\lib\site-packages\docker\utils\decorators.py", line 46, in inner
return f(self, *args, **kwargs)
File "project\venv\lib\site-packages\docker\api\client.py", line 226, in _post
return self.post(url, **self._set_request_timeout(kwargs))
File "project\venv\lib\site-packages\requests\sessions.py", line 578, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "project\venv\lib\site-packages\requests\sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "project\venv\lib\site-packages\elasticapm\utils\wrapt\wrappers.py", line 561, in __call__
return self._self_wrapper(self.__wrapped__, self._self_instance,
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\requests.py", line 59, in call
return wrapped(*args, **kwargs)
File "project\venv\lib\site-packages\requests\sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "project\venv\lib\site-packages\requests\adapters.py", line 439, in send
resp = conn.urlopen(
File "project\venv\lib\site-packages\elasticapm\utils\wrapt\wrappers.py", line 561, in __call__
return self._self_wrapper(self.__wrapped__, self._self_instance,
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "project\venv\lib\site-packages\elasticapm\instrumentation\packages\urllib3.py", line 74, in call
destination = url_to_destination(url)
File "project\venv\lib\site-packages\elasticapm\utils\__init__.py", line 146, in url_to_destination
port = parts.port
File "C:\Python38\lib\urllib\parse.py", line 174, in port
raise ValueError(message) from None
ValueError: Port could not be cast to integer value as 'None'
```
**To Reproduce**
Steps to reproduce the behavior:
1. Install "docker" and "elastic-apm" python package within a django app
2. Load APM using default django config
2. Try to use docker
**Expected behavior**
Docker still works
</issue>
<code>
[start of elasticapm/utils/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31 import base64
32 import os
33 import re
34 from functools import partial
35
36 from elasticapm.conf import constants
37 from elasticapm.utils import compat, encoding
38
39 try:
40 from functools import partialmethod
41
42 partial_types = (partial, partialmethod)
43 except ImportError:
44 # Python 2
45 partial_types = (partial,)
46
47
48 default_ports = {"https": 443, "http": 80, "postgresql": 5432, "mysql": 3306, "mssql": 1433}
49
50
51 def varmap(func, var, context=None, name=None):
52 """
53 Executes ``func(key_name, value)`` on all values,
54 recursively discovering dict and list scoped
55 values.
56 """
57 if context is None:
58 context = set()
59 objid = id(var)
60 if objid in context:
61 return func(name, "<...>")
62 context.add(objid)
63 if isinstance(var, dict):
64 ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))
65 elif isinstance(var, (list, tuple)):
66 ret = func(name, [varmap(func, f, context, name) for f in var])
67 else:
68 ret = func(name, var)
69 context.remove(objid)
70 return ret
71
72
73 def get_name_from_func(func):
74 # partials don't have `__module__` or `__name__`, so we use the values from the "inner" function
75 if isinstance(func, partial_types):
76 return "partial({})".format(get_name_from_func(func.func))
77 elif hasattr(func, "_partialmethod") and hasattr(func._partialmethod, "func"):
78 return "partial({})".format(get_name_from_func(func._partialmethod.func))
79
80 module = func.__module__
81
82 if hasattr(func, "__name__"):
83 view_name = func.__name__
84 else: # Fall back if there's no __name__
85 view_name = func.__class__.__name__
86
87 return "{0}.{1}".format(module, view_name)
88
89
90 def build_name_with_http_method_prefix(name, request):
91 return " ".join((request.method, name)) if name else name
92
93
94 def is_master_process():
95 # currently only recognizes uwsgi master process
96 try:
97 import uwsgi
98
99 return os.getpid() == uwsgi.masterpid()
100 except ImportError:
101 return False
102
103
104 def get_url_dict(url):
105 parse_result = compat.urlparse.urlparse(url)
106
107 url_dict = {
108 "full": encoding.keyword_field(url),
109 "protocol": parse_result.scheme + ":",
110 "hostname": encoding.keyword_field(parse_result.hostname),
111 "pathname": encoding.keyword_field(parse_result.path),
112 }
113
114 port = None if parse_result.port is None else str(parse_result.port)
115
116 if port:
117 url_dict["port"] = port
118 if parse_result.query:
119 url_dict["search"] = encoding.keyword_field("?" + parse_result.query)
120 return url_dict
121
122
123 def sanitize_url(url):
124 if "@" not in url:
125 return url
126 parts = compat.urlparse.urlparse(url)
127 return url.replace("%s:%s" % (parts.username, parts.password), "%s:%s" % (parts.username, constants.MASK))
128
129
130 def get_host_from_url(url):
131 parsed_url = compat.urlparse.urlparse(url)
132 host = parsed_url.hostname or " "
133
134 if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
135 host += ":" + str(parsed_url.port)
136
137 return host
138
139
140 def url_to_destination(url, service_type="external"):
141 parts = compat.urlparse.urlsplit(url)
142 hostname = parts.hostname
143 # preserve brackets for IPv6 URLs
144 if "://[" in url:
145 hostname = "[%s]" % hostname
146 port = parts.port
147 default_port = default_ports.get(parts.scheme, None)
148 name = "%s://%s" % (parts.scheme, hostname)
149 resource = hostname
150 if not port and parts.scheme in default_ports:
151 port = default_ports[parts.scheme]
152 if port:
153 if port != default_port:
154 name += ":%d" % port
155 resource += ":%d" % port
156 return {"service": {"name": name, "resource": resource, "type": service_type}}
157
158
159 def read_pem_file(file_obj):
160 cert = b""
161 for line in file_obj:
162 if line.startswith(b"-----BEGIN CERTIFICATE-----"):
163 break
164 for line in file_obj:
165 if not line.startswith(b"-----END CERTIFICATE-----"):
166 cert += line.strip()
167 return base64.b64decode(cert)
168
169
170 def starmatch_to_regex(pattern):
171 i, n = 0, len(pattern)
172 res = []
173 while i < n:
174 c = pattern[i]
175 i = i + 1
176 if c == "*":
177 res.append(".*")
178 else:
179 res.append(re.escape(c))
180 return re.compile(r"(?:%s)\Z" % "".join(res), re.IGNORECASE | re.DOTALL)
181
[end of elasticapm/utils/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py
--- a/elasticapm/utils/__init__.py
+++ b/elasticapm/utils/__init__.py
@@ -143,7 +143,11 @@
# preserve brackets for IPv6 URLs
if "://[" in url:
hostname = "[%s]" % hostname
- port = parts.port
+ try:
+ port = parts.port
+ except ValueError:
+ # Malformed port, just use None rather than raising an exception
+ port = None
default_port = default_ports.get(parts.scheme, None)
name = "%s://%s" % (parts.scheme, hostname)
resource = hostname
|
{"golden_diff": "diff --git a/elasticapm/utils/__init__.py b/elasticapm/utils/__init__.py\n--- a/elasticapm/utils/__init__.py\n+++ b/elasticapm/utils/__init__.py\n@@ -143,7 +143,11 @@\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n- port = parts.port\n+ try:\n+ port = parts.port\n+ except ValueError:\n+ # Malformed port, just use None rather than raising an exception\n+ port = None\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n", "issue": "Docker Python no longer working as soon as elastic-apm is installed\n**Describe the bug**\r\nIt seems like some patches to requests destroy the functionality of the docker python package.\r\n\r\nValueError: invalid literal for int() with base 10: 'None'\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"project\\venv\\lib\\site-packages\\django\\core\\handlers\\exception.py\", line 34, in inner\r\n response = get_response(request)\r\n File \"project\\venv\\lib\\site-packages\\django\\core\\handlers\\base.py\", line 115, in _get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"project\\venv\\lib\\site-packages\\django\\core\\handlers\\base.py\", line 113, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"project\\tool\\src\\company_tool\\web\\api\\decorators.py\", line 21, in wrapper_view_func\r\n return view_func(request, json_data=data, *args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\django\\views\\decorators\\csrf.py\", line 54, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\django\\views\\decorators\\http.py\", line 40, in inner\r\n return func(request, *args, **kwargs)\r\n File \"project\\tool\\src\\company_tool\\web\\api\\views.py\", line 27, in start\r\n container_status = client.containers.run(image=container, command=arguments, detach=True, stdin_open=True, tty=True)\r\n File \"project\\venv\\lib\\site-packages\\docker\\models\\containers.py\", line 802, in run\r\n container = self.create(image=image, command=command,\r\n File \"project\\venv\\lib\\site-packages\\docker\\models\\containers.py\", line 861, in create\r\n resp = self.client.api.create_container(**create_kwargs)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\container.py\", line 430, in create_container\r\n return self.create_container_from_config(config, name)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\container.py\", line 440, in create_container_from_config\r\n res = self._post_json(u, data=config, params=params)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\client.py\", line 289, in _post_json\r\n return self._post(url, data=json.dumps(data2), **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\docker\\utils\\decorators.py\", line 46, in inner\r\n return f(self, *args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\docker\\api\\client.py\", line 226, in _post\r\n return self.post(url, **self._set_request_timeout(kwargs))\r\n File \"project\\venv\\lib\\site-packages\\requests\\sessions.py\", line 578, in post\r\n return self.request('POST', url, data=data, json=json, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\requests\\sessions.py\", line 530, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\utils\\wrapt\\wrappers.py\", line 561, in __call__\r\n return self._self_wrapper(self.__wrapped__, self._self_instance,\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\requests.py\", line 59, in call\r\n return wrapped(*args, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\requests\\sessions.py\", line 643, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"project\\venv\\lib\\site-packages\\requests\\adapters.py\", line 439, in send\r\n resp = conn.urlopen(\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\utils\\wrapt\\wrappers.py\", line 561, in __call__\r\n return self._self_wrapper(self.__wrapped__, self._self_instance,\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\instrumentation\\packages\\urllib3.py\", line 74, in call\r\n destination = url_to_destination(url)\r\n File \"project\\venv\\lib\\site-packages\\elasticapm\\utils\\__init__.py\", line 146, in url_to_destination\r\n port = parts.port\r\n File \"C:\\Python38\\lib\\urllib\\parse.py\", line 174, in port\r\n raise ValueError(message) from None\r\nValueError: Port could not be cast to integer value as 'None'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install \"docker\" and \"elastic-apm\" python package within a django app\r\n2. Load APM using default django config\r\n2. Try to use docker \r\n\r\n**Expected behavior**\r\nDocker still works\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nimport base64\nimport os\nimport re\nfrom functools import partial\n\nfrom elasticapm.conf import constants\nfrom elasticapm.utils import compat, encoding\n\ntry:\n from functools import partialmethod\n\n partial_types = (partial, partialmethod)\nexcept ImportError:\n # Python 2\n partial_types = (partial,)\n\n\ndefault_ports = {\"https\": 443, \"http\": 80, \"postgresql\": 5432, \"mysql\": 3306, \"mssql\": 1433}\n\n\ndef varmap(func, var, context=None, name=None):\n \"\"\"\n Executes ``func(key_name, value)`` on all values,\n recursively discovering dict and list scoped\n values.\n \"\"\"\n if context is None:\n context = set()\n objid = id(var)\n if objid in context:\n return func(name, \"<...>\")\n context.add(objid)\n if isinstance(var, dict):\n ret = func(name, dict((k, varmap(func, v, context, k)) for k, v in compat.iteritems(var)))\n elif isinstance(var, (list, tuple)):\n ret = func(name, [varmap(func, f, context, name) for f in var])\n else:\n ret = func(name, var)\n context.remove(objid)\n return ret\n\n\ndef get_name_from_func(func):\n # partials don't have `__module__` or `__name__`, so we use the values from the \"inner\" function\n if isinstance(func, partial_types):\n return \"partial({})\".format(get_name_from_func(func.func))\n elif hasattr(func, \"_partialmethod\") and hasattr(func._partialmethod, \"func\"):\n return \"partial({})\".format(get_name_from_func(func._partialmethod.func))\n\n module = func.__module__\n\n if hasattr(func, \"__name__\"):\n view_name = func.__name__\n else: # Fall back if there's no __name__\n view_name = func.__class__.__name__\n\n return \"{0}.{1}\".format(module, view_name)\n\n\ndef build_name_with_http_method_prefix(name, request):\n return \" \".join((request.method, name)) if name else name\n\n\ndef is_master_process():\n # currently only recognizes uwsgi master process\n try:\n import uwsgi\n\n return os.getpid() == uwsgi.masterpid()\n except ImportError:\n return False\n\n\ndef get_url_dict(url):\n parse_result = compat.urlparse.urlparse(url)\n\n url_dict = {\n \"full\": encoding.keyword_field(url),\n \"protocol\": parse_result.scheme + \":\",\n \"hostname\": encoding.keyword_field(parse_result.hostname),\n \"pathname\": encoding.keyword_field(parse_result.path),\n }\n\n port = None if parse_result.port is None else str(parse_result.port)\n\n if port:\n url_dict[\"port\"] = port\n if parse_result.query:\n url_dict[\"search\"] = encoding.keyword_field(\"?\" + parse_result.query)\n return url_dict\n\n\ndef sanitize_url(url):\n if \"@\" not in url:\n return url\n parts = compat.urlparse.urlparse(url)\n return url.replace(\"%s:%s\" % (parts.username, parts.password), \"%s:%s\" % (parts.username, constants.MASK))\n\n\ndef get_host_from_url(url):\n parsed_url = compat.urlparse.urlparse(url)\n host = parsed_url.hostname or \" \"\n\n if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:\n host += \":\" + str(parsed_url.port)\n\n return host\n\n\ndef url_to_destination(url, service_type=\"external\"):\n parts = compat.urlparse.urlsplit(url)\n hostname = parts.hostname\n # preserve brackets for IPv6 URLs\n if \"://[\" in url:\n hostname = \"[%s]\" % hostname\n port = parts.port\n default_port = default_ports.get(parts.scheme, None)\n name = \"%s://%s\" % (parts.scheme, hostname)\n resource = hostname\n if not port and parts.scheme in default_ports:\n port = default_ports[parts.scheme]\n if port:\n if port != default_port:\n name += \":%d\" % port\n resource += \":%d\" % port\n return {\"service\": {\"name\": name, \"resource\": resource, \"type\": service_type}}\n\n\ndef read_pem_file(file_obj):\n cert = b\"\"\n for line in file_obj:\n if line.startswith(b\"-----BEGIN CERTIFICATE-----\"):\n break\n for line in file_obj:\n if not line.startswith(b\"-----END CERTIFICATE-----\"):\n cert += line.strip()\n return base64.b64decode(cert)\n\n\ndef starmatch_to_regex(pattern):\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), re.IGNORECASE | re.DOTALL)\n", "path": "elasticapm/utils/__init__.py"}]}
| 3,688 | 166 |
gh_patches_debug_30819
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2322
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use `show objects in schema` on Snowflake
### Describe the feature
dbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).
Caveats:
- Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.
- The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.
- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.
- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue
</issue>
<code>
[start of plugins/snowflake/dbt/adapters/snowflake/impl.py]
1 from typing import Mapping, Any, Optional, List
2
3 import agate
4
5 from dbt.adapters.sql import SQLAdapter
6 from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
7 from dbt.adapters.snowflake import SnowflakeConnectionManager
8 from dbt.adapters.snowflake import SnowflakeRelation
9 from dbt.adapters.snowflake import SnowflakeColumn
10 from dbt.contracts.graph.manifest import Manifest
11 from dbt.exceptions import RuntimeException, DatabaseException
12 from dbt.utils import filter_null_values
13
14
15 class SnowflakeAdapter(SQLAdapter):
16 Relation = SnowflakeRelation
17 Column = SnowflakeColumn
18 ConnectionManager = SnowflakeConnectionManager
19
20 AdapterSpecificConfigs = frozenset(
21 {"transient", "cluster_by", "automatic_clustering", "secure",
22 "copy_grants", "snowflake_warehouse"}
23 )
24
25 @classmethod
26 def date_function(cls):
27 return "CURRENT_TIMESTAMP()"
28
29 @classmethod
30 def _catalog_filter_table(
31 cls, table: agate.Table, manifest: Manifest
32 ) -> agate.Table:
33 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
34 # the column names to their lowercased forms.
35 lowered = table.rename(
36 column_names=[c.lower() for c in table.column_names]
37 )
38 return super()._catalog_filter_table(lowered, manifest)
39
40 def _make_match_kwargs(self, database, schema, identifier):
41 quoting = self.config.quoting
42 if identifier is not None and quoting["identifier"] is False:
43 identifier = identifier.upper()
44
45 if schema is not None and quoting["schema"] is False:
46 schema = schema.upper()
47
48 if database is not None and quoting["database"] is False:
49 database = database.upper()
50
51 return filter_null_values(
52 {"identifier": identifier, "schema": schema, "database": database}
53 )
54
55 def _get_warehouse(self) -> str:
56 _, table = self.execute(
57 'select current_warehouse() as warehouse',
58 fetch=True
59 )
60 if len(table) == 0 or len(table[0]) == 0:
61 # can this happen?
62 raise RuntimeException(
63 'Could not get current warehouse: no results'
64 )
65 return str(table[0][0])
66
67 def _use_warehouse(self, warehouse: str):
68 """Use the given warehouse. Quotes are never applied."""
69 self.execute('use warehouse {}'.format(warehouse))
70
71 def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:
72 default_warehouse = self.config.credentials.warehouse
73 warehouse = config.get('snowflake_warehouse', default_warehouse)
74 if warehouse == default_warehouse or warehouse is None:
75 return None
76 previous = self._get_warehouse()
77 self._use_warehouse(warehouse)
78 return previous
79
80 def post_model_hook(
81 self, config: Mapping[str, Any], context: Optional[str]
82 ) -> None:
83 if context is not None:
84 self._use_warehouse(context)
85
86 def list_schemas(self, database: str) -> List[str]:
87 try:
88 results = self.execute_macro(
89 LIST_SCHEMAS_MACRO_NAME,
90 kwargs={'database': database}
91 )
92 except DatabaseException as exc:
93 msg = (
94 f'Database error while listing schemas in database '
95 f'"{database}"\n{exc}'
96 )
97 raise RuntimeException(msg)
98 # this uses 'show terse schemas in database', and the column name we
99 # want is 'name'
100
101 return [row['name'] for row in results]
102
[end of plugins/snowflake/dbt/adapters/snowflake/impl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -3,7 +3,10 @@
import agate
from dbt.adapters.sql import SQLAdapter
-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
+from dbt.adapters.sql.impl import (
+ LIST_SCHEMAS_MACRO_NAME,
+ LIST_RELATIONS_MACRO_NAME,
+)
from dbt.adapters.snowflake import SnowflakeConnectionManager
from dbt.adapters.snowflake import SnowflakeRelation
from dbt.adapters.snowflake import SnowflakeColumn
@@ -99,3 +102,43 @@
# want is 'name'
return [row['name'] for row in results]
+
+ def list_relations_without_caching(
+ self, information_schema, schema
+ ) -> List[SnowflakeRelation]:
+ kwargs = {'information_schema': information_schema, 'schema': schema}
+ try:
+ results = self.execute_macro(
+ LIST_RELATIONS_MACRO_NAME,
+ kwargs=kwargs
+ )
+ except DatabaseException as exc:
+ # if the schema doesn't exist, we just want to return.
+ # Alternatively, we could query the list of schemas before we start
+ # and skip listing the missing ones, which sounds expensive.
+ if 'Object does not exist' in str(exc):
+ return []
+ raise
+
+ relations = []
+ quote_policy = {
+ 'database': True,
+ 'schema': True,
+ 'identifier': True
+ }
+
+ columns = ['database_name', 'schema_name', 'name', 'kind']
+ for _database, _schema, _identifier, _type in results.select(columns):
+ try:
+ _type = self.Relation.get_relation_type(_type.lower())
+ except ValueError:
+ _type = self.Relation.External
+ relations.append(self.Relation.create(
+ database=_database,
+ schema=_schema,
+ identifier=_identifier,
+ quote_policy=quote_policy,
+ type=_type
+ ))
+
+ return relations
|
{"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -3,7 +3,10 @@\n import agate\n \n from dbt.adapters.sql import SQLAdapter\n-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\n+from dbt.adapters.sql.impl import (\n+ LIST_SCHEMAS_MACRO_NAME,\n+ LIST_RELATIONS_MACRO_NAME,\n+)\n from dbt.adapters.snowflake import SnowflakeConnectionManager\n from dbt.adapters.snowflake import SnowflakeRelation\n from dbt.adapters.snowflake import SnowflakeColumn\n@@ -99,3 +102,43 @@\n # want is 'name'\n \n return [row['name'] for row in results]\n+\n+ def list_relations_without_caching(\n+ self, information_schema, schema\n+ ) -> List[SnowflakeRelation]:\n+ kwargs = {'information_schema': information_schema, 'schema': schema}\n+ try:\n+ results = self.execute_macro(\n+ LIST_RELATIONS_MACRO_NAME,\n+ kwargs=kwargs\n+ )\n+ except DatabaseException as exc:\n+ # if the schema doesn't exist, we just want to return.\n+ # Alternatively, we could query the list of schemas before we start\n+ # and skip listing the missing ones, which sounds expensive.\n+ if 'Object does not exist' in str(exc):\n+ return []\n+ raise\n+\n+ relations = []\n+ quote_policy = {\n+ 'database': True,\n+ 'schema': True,\n+ 'identifier': True\n+ }\n+\n+ columns = ['database_name', 'schema_name', 'name', 'kind']\n+ for _database, _schema, _identifier, _type in results.select(columns):\n+ try:\n+ _type = self.Relation.get_relation_type(_type.lower())\n+ except ValueError:\n+ _type = self.Relation.External\n+ relations.append(self.Relation.create(\n+ database=_database,\n+ schema=_schema,\n+ identifier=_identifier,\n+ quote_policy=quote_policy,\n+ type=_type\n+ ))\n+\n+ return relations\n", "issue": "Use `show objects in schema` on Snowflake\n### Describe the feature\r\ndbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).\r\n\r\nCaveats:\r\n - Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.\r\n - The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.\r\n- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.\r\n- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue\n", "before_files": [{"content": "from typing import Mapping, Any, Optional, List\n\nimport agate\n\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\",\n \"copy_grants\", \"snowflake_warehouse\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]}
| 1,756 | 513 |
gh_patches_debug_32064
|
rasdani/github-patches
|
git_diff
|
apache__airflow-14274
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
upgrade_check fails db version check
**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15
**Environment**: DEV
- **Cloud provider or hardware configuration**: AWS
- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)
- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux
- **Install tools**:
- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base
**What happened**: Running `airflow upgrade_check` returns the following error:
```
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check
==================================================== STATUS ====================================================
Check for latest versions of apache-airflow and checker...............................................SUCCESS
Remove airflow.AirflowMacroPlugin class...............................................................SUCCESS
/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.
DeprecationWarning)
Ensure users are not using custom metaclasses in custom operators.....................................SUCCESS
Chain between DAG and operator not allowed............................................................SUCCESS
Connection.conn_type is not nullable..................................................................SUCCESS
Custom Executors now require full path................................................................SUCCESS
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 118, in run
all_problems = check_upgrade(formatter, rules)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 38, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py", line 56, in check
installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
File "/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py", line 298, in __init__
raise InvalidVersion("Invalid version: '{0}'".format(version))
packaging.version.InvalidVersion: Invalid version: '5.7.26-log'
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$
```
**What you expected to happen**: commands runs through and prints helpful messages
<!-- What do you think went wrong? -->
Running `SELECT VERSION();').scalar()` against the metastore db returns "5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the "-log" ending?
```
mysql> select VERSION();
+------------+
| VERSION() |
+------------+
| 5.7.26-log |
+------------+
1 row in set (0.00 sec)
```
**How to reproduce it**: Run `airflow upgrade_check` again.
**Anything else we need to know**:
Dockerfile snippet:
```
FROM apache/airflow:1.10.14
...
USER ${AIRFLOW_UID}
RUN pip install --user \
airflow-kubernetes-job-operator \
apache-airflow-backport-providers-cncf-kubernetes \
apache-airflow-backport-providers-ssh \
apache-airflow-upgrade-check
```
How often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.
</issue>
<code>
[start of airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 from packaging.version import Version
19
20 from airflow.configuration import conf
21 from airflow.upgrade.rules.base_rule import BaseRule
22 from airflow.utils.db import provide_session
23
24
25 class DatabaseVersionCheckRule(BaseRule):
26 title = "Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0"
27
28 description = """\
29 From Airflow 2.0, the following database versions are supported:
30 PostgreSQl - 9.6, 10, 11, 12, 13;
31 MySQL - 5.7, 8;
32 SQLite - 3.15+
33 """
34
35 @provide_session
36 def check(self, session=None):
37
38 more_info = "See link below for more details: https://github.com/apache/airflow#requirements"
39
40 conn_str = conf.get(section="core", key="sql_alchemy_conn")
41
42 if "sqlite" in conn_str:
43 min_req_sqlite_version = Version('3.15')
44 installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
45 if installed_sqlite_version < min_req_sqlite_version:
46 return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
47
48 elif "postgres" in conn_str:
49 min_req_postgres_version = Version('9.6')
50 installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
51 if installed_postgres_version < min_req_postgres_version:
52 return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
53
54 elif "mysql" in conn_str:
55 min_req_mysql_version = Version('5.7')
56 installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
57 if installed_mysql_version < min_req_mysql_version:
58 return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
59
[end of airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
@@ -43,16 +43,23 @@
min_req_sqlite_version = Version('3.15')
installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
if installed_sqlite_version < min_req_sqlite_version:
- return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, SQLite version below {} is no longer supported. \n{}".format(
+ min_req_sqlite_version, more_info
+ )
elif "postgres" in conn_str:
min_req_postgres_version = Version('9.6')
installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
if installed_postgres_version < min_req_postgres_version:
- return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, PostgreSQL version below {} is no longer supported. \n{}".format(
+ min_req_postgres_version, more_info
+ )
elif "mysql" in conn_str:
min_req_mysql_version = Version('5.7')
- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
+ # special treatment is needed here, because MySQL version may include a suffix like '-log'
+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])
if installed_mysql_version < min_req_mysql_version:
- return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, MySQL version below {} is no longer supported. \n{}".format(
+ min_req_mysql_version, more_info
+ )
|
{"golden_diff": "diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n@@ -43,16 +43,23 @@\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n- return \"From Airflow 2.0, SQLite version below 3.15 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, SQLite version below {} is no longer supported. \\n{}\".format(\n+ min_req_sqlite_version, more_info\n+ )\n \n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n- return \"From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, PostgreSQL version below {} is no longer supported. \\n{}\".format(\n+ min_req_postgres_version, more_info\n+ )\n \n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\n+ # special treatment is needed here, because MySQL version may include a suffix like '-log'\n+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])\n if installed_mysql_version < min_req_mysql_version:\n- return \"From Airflow 2.0, MySQL version below 5.7 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, MySQL version below {} is no longer supported. \\n{}\".format(\n+ min_req_mysql_version, more_info\n+ )\n", "issue": "upgrade_check fails db version check\n**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15\r\n\r\n**Environment**: DEV\r\n\r\n- **Cloud provider or hardware configuration**: AWS\r\n- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)\r\n- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux\r\n- **Install tools**: \r\n- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base\r\n\r\n**What happened**: Running `airflow upgrade_check` returns the following error:\r\n```\r\nairflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check\r\n\r\n==================================================== STATUS ====================================================\r\nCheck for latest versions of apache-airflow and checker...............................................SUCCESS\r\nRemove airflow.AirflowMacroPlugin class...............................................................SUCCESS\r\n/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.\r\n DeprecationWarning)\r\nEnsure users are not using custom metaclasses in custom operators.....................................SUCCESS\r\nChain between DAG and operator not allowed............................................................SUCCESS\r\nConnection.conn_type is not nullable..................................................................SUCCESS\r\nCustom Executors now require full path................................................................SUCCESS\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py\", line 118, in run\r\n all_problems = check_upgrade(formatter, rules)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py\", line 38, in check_upgrade\r\n rule_status = RuleStatus.from_rule(rule)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py\", line 44, in from_rule\r\n result = rule.check()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py\", line 74, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\", line 56, in check\r\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py\", line 298, in __init__\r\n raise InvalidVersion(\"Invalid version: '{0}'\".format(version))\r\npackaging.version.InvalidVersion: Invalid version: '5.7.26-log'\r\nairflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$\r\n```\r\n\r\n**What you expected to happen**: commands runs through and prints helpful messages\r\n\r\n<!-- What do you think went wrong? -->\r\nRunning `SELECT VERSION();').scalar()` against the metastore db returns \"5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the \"-log\" ending?\r\n\r\n```\r\nmysql> select VERSION();\r\n+------------+\r\n| VERSION() |\r\n+------------+\r\n| 5.7.26-log |\r\n+------------+\r\n1 row in set (0.00 sec)\r\n```\r\n\r\n**How to reproduce it**: Run `airflow upgrade_check` again.\r\n\r\n**Anything else we need to know**: \r\nDockerfile snippet:\r\n```\r\nFROM apache/airflow:1.10.14\r\n...\r\nUSER ${AIRFLOW_UID}\r\nRUN pip install --user \\\r\n airflow-kubernetes-job-operator \\\r\n apache-airflow-backport-providers-cncf-kubernetes \\\r\n apache-airflow-backport-providers-ssh \\\r\n apache-airflow-upgrade-check\r\n```\r\n\r\nHow often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom packaging.version import Version\n\nfrom airflow.configuration import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.db import provide_session\n\n\nclass DatabaseVersionCheckRule(BaseRule):\n title = \"Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0\"\n\n description = \"\"\"\\\nFrom Airflow 2.0, the following database versions are supported:\nPostgreSQl - 9.6, 10, 11, 12, 13;\nMySQL - 5.7, 8;\nSQLite - 3.15+\n \"\"\"\n\n @provide_session\n def check(self, session=None):\n\n more_info = \"See link below for more details: https://github.com/apache/airflow#requirements\"\n\n conn_str = conf.get(section=\"core\", key=\"sql_alchemy_conn\")\n\n if \"sqlite\" in conn_str:\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n return \"From Airflow 2.0, SQLite version below 3.15 is no longer supported. \\n\" + more_info\n\n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n return \"From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \\n\" + more_info\n\n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\n if installed_mysql_version < min_req_mysql_version:\n return \"From Airflow 2.0, MySQL version below 5.7 is no longer supported. \\n\" + more_info\n", "path": "airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py"}]}
| 2,308 | 486 |
gh_patches_debug_9223
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-685
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Restore pex.pex_bootstrapper.is_compressed API
#680 broke https://github.com/wickman/lambdex which has a dependency on this removed function. https://github.com/wickman/lambdex/issues/5 is filed to remove this un-necessary dependence going forward.
</issue>
<code>
[start of pex/pex_bootstrapper.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import absolute_import, print_function
5
6 import os
7 import sys
8
9 from pex import pex_warnings
10 from pex.common import die
11 from pex.executor import Executor
12 from pex.interpreter import PythonInterpreter
13 from pex.interpreter_constraints import matched_interpreters
14 from pex.tracer import TRACER
15 from pex.variables import ENV
16
17 __all__ = ('bootstrap_pex',)
18
19
20 def find_in_path(target_interpreter):
21 if os.path.exists(target_interpreter):
22 return target_interpreter
23
24 for directory in os.getenv('PATH', '').split(os.pathsep):
25 try_path = os.path.join(directory, target_interpreter)
26 if os.path.exists(try_path):
27 return try_path
28
29
30 def find_compatible_interpreters(pex_python_path, compatibility_constraints):
31 """Find all compatible interpreters on the system within the supplied constraints and use
32 PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.
33 """
34 if pex_python_path:
35 interpreters = []
36 for binary in pex_python_path.split(os.pathsep):
37 try:
38 interpreters.append(PythonInterpreter.from_binary(binary))
39 except Executor.ExecutionError:
40 print("Python interpreter %s in PEX_PYTHON_PATH failed to load properly." % binary,
41 file=sys.stderr)
42 if not interpreters:
43 die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')
44 else:
45 if not os.getenv('PATH', ''):
46 # no $PATH, use sys.executable
47 interpreters = [PythonInterpreter.get()]
48 else:
49 # get all qualifying interpreters found in $PATH
50 interpreters = PythonInterpreter.all()
51
52 return list(
53 matched_interpreters(interpreters, compatibility_constraints)
54 if compatibility_constraints
55 else interpreters
56 )
57
58
59 def _select_pex_python_interpreter(target_python, compatibility_constraints):
60 target = find_in_path(target_python)
61
62 if not target:
63 die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)
64 if compatibility_constraints:
65 pi = PythonInterpreter.from_binary(target)
66 if not list(matched_interpreters([pi], compatibility_constraints)):
67 die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '
68 'interpreter constraints: %s' % (target, str(compatibility_constraints)))
69 if not os.path.exists(target):
70 die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)
71 return target
72
73
74 def _select_interpreter(pex_python_path, compatibility_constraints):
75 compatible_interpreters = find_compatible_interpreters(
76 pex_python_path, compatibility_constraints)
77
78 if not compatible_interpreters:
79 die('Failed to find compatible interpreter for constraints: %s'
80 % str(compatibility_constraints))
81 # TODO: https://github.com/pantsbuild/pex/issues/430
82 target = min(compatible_interpreters).binary
83
84 if os.path.exists(target):
85 return target
86
87
88 def maybe_reexec_pex(compatibility_constraints):
89 """
90 Handle environment overrides for the Python interpreter to use when executing this pex.
91
92 This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO
93 metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the
94 interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for
95 a matching interpreter in accordance with the interpreter constraints. If both variables are
96 present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no
97 compatible interpreters can be found on said path.
98
99 If neither variable is set, we fall back to plain PEX execution using PATH searching or the
100 currently executing interpreter. If compatibility constraints are used, we match those constraints
101 against these interpreters.
102
103 :param compatibility_constraints: list of requirements-style strings that constrain the
104 Python interpreter to re-exec this pex with.
105 """
106 if os.environ.pop('SHOULD_EXIT_BOOTSTRAP_REEXEC', None):
107 # We've already been here and selected an interpreter. Continue to execution.
108 return
109
110 target = None
111 with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):
112 if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:
113 # preserve PEX_PYTHON re-exec for backwards compatibility
114 # TODO: Kill this off completely in favor of PEX_PYTHON_PATH
115 # https://github.com/pantsbuild/pex/issues/431
116 target = _select_pex_python_interpreter(ENV.PEX_PYTHON,
117 compatibility_constraints)
118 elif ENV.PEX_PYTHON_PATH:
119 target = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)
120
121 elif compatibility_constraints:
122 # Apply constraints to target using regular PATH
123 target = _select_interpreter(
124 pex_python_path=None,
125 compatibility_constraints=compatibility_constraints
126 )
127
128 if target and os.path.realpath(target) != os.path.realpath(sys.executable):
129 cmdline = [target] + sys.argv
130 TRACER.log('Re-executing: cmdline="%s", sys.executable="%s", PEX_PYTHON="%s", '
131 'PEX_PYTHON_PATH="%s", COMPATIBILITY_CONSTRAINTS="%s"'
132 % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,
133 compatibility_constraints))
134 ENV.delete('PEX_PYTHON')
135 ENV.delete('PEX_PYTHON_PATH')
136 os.environ['SHOULD_EXIT_BOOTSTRAP_REEXEC'] = '1'
137 os.execve(target, cmdline, ENV.copy())
138
139
140 def _bootstrap(entry_point):
141 from .pex_info import PexInfo
142 pex_info = PexInfo.from_pex(entry_point)
143 pex_warnings.configure_warnings(pex_info)
144
145 from .finders import register_finders
146 register_finders()
147
148 return pex_info
149
150
151 def bootstrap_pex(entry_point):
152 pex_info = _bootstrap(entry_point)
153 maybe_reexec_pex(pex_info.interpreter_constraints)
154
155 from . import pex
156 pex.PEX(entry_point).execute()
157
158
159 def bootstrap_pex_env(entry_point):
160 """Bootstrap the current runtime environment using a given pex."""
161 pex_info = _bootstrap(entry_point)
162
163 from .environment import PEXEnvironment
164 PEXEnvironment(entry_point, pex_info).activate()
165
[end of pex/pex_bootstrapper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py
--- a/pex/pex_bootstrapper.py
+++ b/pex/pex_bootstrapper.py
@@ -156,6 +156,13 @@
pex.PEX(entry_point).execute()
+# NB: This helper is used by third party libs - namely https://github.com/wickman/lambdex.
+# TODO(John Sirois): Kill once https://github.com/wickman/lambdex/issues/5 is resolved.
+def is_compressed(entry_point):
+ from .pex_info import PexInfo
+ return os.path.exists(entry_point) and not os.path.exists(os.path.join(entry_point, PexInfo.PATH))
+
+
def bootstrap_pex_env(entry_point):
"""Bootstrap the current runtime environment using a given pex."""
pex_info = _bootstrap(entry_point)
|
{"golden_diff": "diff --git a/pex/pex_bootstrapper.py b/pex/pex_bootstrapper.py\n--- a/pex/pex_bootstrapper.py\n+++ b/pex/pex_bootstrapper.py\n@@ -156,6 +156,13 @@\n pex.PEX(entry_point).execute()\n \n \n+# NB: This helper is used by third party libs - namely https://github.com/wickman/lambdex.\n+# TODO(John Sirois): Kill once https://github.com/wickman/lambdex/issues/5 is resolved.\n+def is_compressed(entry_point):\n+ from .pex_info import PexInfo\n+ return os.path.exists(entry_point) and not os.path.exists(os.path.join(entry_point, PexInfo.PATH))\n+\n+\n def bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n pex_info = _bootstrap(entry_point)\n", "issue": "Restore pex.pex_bootstrapper.is_compressed API\n#680 broke https://github.com/wickman/lambdex which has a dependency on this removed function. https://github.com/wickman/lambdex/issues/5 is filed to remove this un-necessary dependence going forward.\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\n\nfrom pex import pex_warnings\nfrom pex.common import die\nfrom pex.executor import Executor\nfrom pex.interpreter import PythonInterpreter\nfrom pex.interpreter_constraints import matched_interpreters\nfrom pex.tracer import TRACER\nfrom pex.variables import ENV\n\n__all__ = ('bootstrap_pex',)\n\n\ndef find_in_path(target_interpreter):\n if os.path.exists(target_interpreter):\n return target_interpreter\n\n for directory in os.getenv('PATH', '').split(os.pathsep):\n try_path = os.path.join(directory, target_interpreter)\n if os.path.exists(try_path):\n return try_path\n\n\ndef find_compatible_interpreters(pex_python_path, compatibility_constraints):\n \"\"\"Find all compatible interpreters on the system within the supplied constraints and use\n PEX_PYTHON_PATH if it is set. If not, fall back to interpreters on $PATH.\n \"\"\"\n if pex_python_path:\n interpreters = []\n for binary in pex_python_path.split(os.pathsep):\n try:\n interpreters.append(PythonInterpreter.from_binary(binary))\n except Executor.ExecutionError:\n print(\"Python interpreter %s in PEX_PYTHON_PATH failed to load properly.\" % binary,\n file=sys.stderr)\n if not interpreters:\n die('PEX_PYTHON_PATH was defined, but no valid interpreters could be identified. Exiting.')\n else:\n if not os.getenv('PATH', ''):\n # no $PATH, use sys.executable\n interpreters = [PythonInterpreter.get()]\n else:\n # get all qualifying interpreters found in $PATH\n interpreters = PythonInterpreter.all()\n\n return list(\n matched_interpreters(interpreters, compatibility_constraints)\n if compatibility_constraints\n else interpreters\n )\n\n\ndef _select_pex_python_interpreter(target_python, compatibility_constraints):\n target = find_in_path(target_python)\n\n if not target:\n die('Failed to find interpreter specified by PEX_PYTHON: %s' % target)\n if compatibility_constraints:\n pi = PythonInterpreter.from_binary(target)\n if not list(matched_interpreters([pi], compatibility_constraints)):\n die('Interpreter specified by PEX_PYTHON (%s) is not compatible with specified '\n 'interpreter constraints: %s' % (target, str(compatibility_constraints)))\n if not os.path.exists(target):\n die('Target interpreter specified by PEX_PYTHON %s does not exist. Exiting.' % target)\n return target\n\n\ndef _select_interpreter(pex_python_path, compatibility_constraints):\n compatible_interpreters = find_compatible_interpreters(\n pex_python_path, compatibility_constraints)\n\n if not compatible_interpreters:\n die('Failed to find compatible interpreter for constraints: %s'\n % str(compatibility_constraints))\n # TODO: https://github.com/pantsbuild/pex/issues/430\n target = min(compatible_interpreters).binary\n\n if os.path.exists(target):\n return target\n\n\ndef maybe_reexec_pex(compatibility_constraints):\n \"\"\"\n Handle environment overrides for the Python interpreter to use when executing this pex.\n\n This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO\n metadata. If PEX_PYTHON is set in a pexrc, it attempts to obtain the binary location of the\n interpreter specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for\n a matching interpreter in accordance with the interpreter constraints. If both variables are\n present in a pexrc, this function gives precedence to PEX_PYTHON_PATH and errors out if no\n compatible interpreters can be found on said path.\n\n If neither variable is set, we fall back to plain PEX execution using PATH searching or the\n currently executing interpreter. If compatibility constraints are used, we match those constraints\n against these interpreters.\n\n :param compatibility_constraints: list of requirements-style strings that constrain the\n Python interpreter to re-exec this pex with.\n \"\"\"\n if os.environ.pop('SHOULD_EXIT_BOOTSTRAP_REEXEC', None):\n # We've already been here and selected an interpreter. Continue to execution.\n return\n\n target = None\n with TRACER.timed('Selecting runtime interpreter based on pexrc', V=3):\n if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:\n # preserve PEX_PYTHON re-exec for backwards compatibility\n # TODO: Kill this off completely in favor of PEX_PYTHON_PATH\n # https://github.com/pantsbuild/pex/issues/431\n target = _select_pex_python_interpreter(ENV.PEX_PYTHON,\n compatibility_constraints)\n elif ENV.PEX_PYTHON_PATH:\n target = _select_interpreter(ENV.PEX_PYTHON_PATH, compatibility_constraints)\n\n elif compatibility_constraints:\n # Apply constraints to target using regular PATH\n target = _select_interpreter(\n pex_python_path=None,\n compatibility_constraints=compatibility_constraints\n )\n\n if target and os.path.realpath(target) != os.path.realpath(sys.executable):\n cmdline = [target] + sys.argv\n TRACER.log('Re-executing: cmdline=\"%s\", sys.executable=\"%s\", PEX_PYTHON=\"%s\", '\n 'PEX_PYTHON_PATH=\"%s\", COMPATIBILITY_CONSTRAINTS=\"%s\"'\n % (cmdline, sys.executable, ENV.PEX_PYTHON, ENV.PEX_PYTHON_PATH,\n compatibility_constraints))\n ENV.delete('PEX_PYTHON')\n ENV.delete('PEX_PYTHON_PATH')\n os.environ['SHOULD_EXIT_BOOTSTRAP_REEXEC'] = '1'\n os.execve(target, cmdline, ENV.copy())\n\n\ndef _bootstrap(entry_point):\n from .pex_info import PexInfo\n pex_info = PexInfo.from_pex(entry_point)\n pex_warnings.configure_warnings(pex_info)\n\n from .finders import register_finders\n register_finders()\n\n return pex_info\n\n\ndef bootstrap_pex(entry_point):\n pex_info = _bootstrap(entry_point)\n maybe_reexec_pex(pex_info.interpreter_constraints)\n\n from . import pex\n pex.PEX(entry_point).execute()\n\n\ndef bootstrap_pex_env(entry_point):\n \"\"\"Bootstrap the current runtime environment using a given pex.\"\"\"\n pex_info = _bootstrap(entry_point)\n\n from .environment import PEXEnvironment\n PEXEnvironment(entry_point, pex_info).activate()\n", "path": "pex/pex_bootstrapper.py"}]}
| 2,443 | 199 |
gh_patches_debug_12748
|
rasdani/github-patches
|
git_diff
|
searx__searx-233
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Requests does not follow on an HTTP 301
We had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.
Anyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.
So why isn't 301 followed ? Is there a way to force it ?
</issue>
<code>
[start of searx/engines/kickass.py]
1 ## Kickass Torrent (Videos, Music, Files)
2 #
3 # @website https://kickass.so
4 # @provide-api no (nothing found)
5 #
6 # @using-api no
7 # @results HTML (using search portal)
8 # @stable yes (HTML can change)
9 # @parse url, title, content, seed, leech, magnetlink
10
11 from urlparse import urljoin
12 from cgi import escape
13 from urllib import quote
14 from lxml import html
15 from operator import itemgetter
16 from searx.engines.xpath import extract_text
17
18 # engine dependent config
19 categories = ['videos', 'music', 'files']
20 paging = True
21
22 # search-url
23 url = 'https://kickass.so/'
24 search_url = url + 'search/{search_term}/{pageno}/'
25
26 # specific xpath variables
27 magnet_xpath = './/a[@title="Torrent magnet link"]'
28 torrent_xpath = './/a[@title="Download torrent file"]'
29 content_xpath = './/span[@class="font11px lightgrey block"]'
30
31
32 # do search-request
33 def request(query, params):
34 params['url'] = search_url.format(search_term=quote(query),
35 pageno=params['pageno'])
36
37 # FIX: SSLError: hostname 'kickass.so'
38 # doesn't match either of '*.kickass.to', 'kickass.to'
39 params['verify'] = False
40
41 return params
42
43
44 # get response from search-request
45 def response(resp):
46 results = []
47
48 dom = html.fromstring(resp.text)
49
50 search_res = dom.xpath('//table[@class="data"]//tr')
51
52 # return empty array if nothing is found
53 if not search_res:
54 return []
55
56 # parse results
57 for result in search_res[1:]:
58 link = result.xpath('.//a[@class="cellMainLink"]')[0]
59 href = urljoin(url, link.attrib['href'])
60 title = extract_text(link)
61 content = escape(extract_text(result.xpath(content_xpath)))
62 seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
63 leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
64 filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
65 filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
66 files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
67
68 # convert seed to int if possible
69 if seed.isdigit():
70 seed = int(seed)
71 else:
72 seed = 0
73
74 # convert leech to int if possible
75 if leech.isdigit():
76 leech = int(leech)
77 else:
78 leech = 0
79
80 # convert filesize to byte if possible
81 try:
82 filesize = float(filesize)
83
84 # convert filesize to byte
85 if filesize_multiplier == 'TB':
86 filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
87 elif filesize_multiplier == 'GB':
88 filesize = int(filesize * 1024 * 1024 * 1024)
89 elif filesize_multiplier == 'MB':
90 filesize = int(filesize * 1024 * 1024)
91 elif filesize_multiplier == 'KB':
92 filesize = int(filesize * 1024)
93 except:
94 filesize = None
95
96 # convert files to int if possible
97 if files.isdigit():
98 files = int(files)
99 else:
100 files = None
101
102 magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
103
104 torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
105 torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
106
107 # append result
108 results.append({'url': href,
109 'title': title,
110 'content': content,
111 'seed': seed,
112 'leech': leech,
113 'filesize': filesize,
114 'files': files,
115 'magnetlink': magnetlink,
116 'torrentfile': torrentfileurl,
117 'template': 'torrent.html'})
118
119 # return results sorted by seeder
120 return sorted(results, key=itemgetter('seed'), reverse=True)
121
[end of searx/engines/kickass.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -20,7 +20,7 @@
paging = True
# search-url
-url = 'https://kickass.so/'
+url = 'https://kickass.to/'
search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
@@ -45,6 +45,11 @@
def response(resp):
results = []
+ # check if redirect comparing to the True value,
+ # because resp can be a Mock object, and any attribut name returns something.
+ if resp.is_redirect is True:
+ return results
+
dom = html.fromstring(resp.text)
search_res = dom.xpath('//table[@class="data"]//tr')
|
{"golden_diff": "diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py\n--- a/searx/engines/kickass.py\n+++ b/searx/engines/kickass.py\n@@ -20,7 +20,7 @@\n paging = True\n \n # search-url\n-url = 'https://kickass.so/'\n+url = 'https://kickass.to/'\n search_url = url + 'search/{search_term}/{pageno}/'\n \n # specific xpath variables\n@@ -45,6 +45,11 @@\n def response(resp):\n results = []\n \n+ # check if redirect comparing to the True value,\n+ # because resp can be a Mock object, and any attribut name returns something.\n+ if resp.is_redirect is True:\n+ return results\n+\n dom = html.fromstring(resp.text)\n \n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n", "issue": "Requests does not follow on an HTTP 301\nWe had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.\n\nAnyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.\n\nSo why isn't 301 followed ? Is there a way to force it ?\n\n", "before_files": [{"content": "## Kickass Torrent (Videos, Music, Files)\n#\n# @website https://kickass.so\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://kickass.so/'\nsearch_url = url + 'search/{search_term}/{pageno}/'\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Torrent magnet link\"]'\ntorrent_xpath = './/a[@title=\"Download torrent file\"]'\ncontent_xpath = './/span[@class=\"font11px lightgrey block\"]'\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(search_term=quote(query),\n pageno=params['pageno'])\n\n # FIX: SSLError: hostname 'kickass.so'\n # doesn't match either of '*.kickass.to', 'kickass.to'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//a[@class=\"cellMainLink\"]')[0]\n href = urljoin(url, link.attrib['href'])\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed = result.xpath('.//td[contains(@class, \"green\")]/text()')[0]\n leech = result.xpath('.//td[contains(@class, \"red\")]/text()')[0]\n filesize = result.xpath('.//td[contains(@class, \"nobr\")]/text()')[0]\n filesize_multiplier = result.xpath('.//td[contains(@class, \"nobr\")]//span/text()')[0]\n files = result.xpath('.//td[contains(@class, \"center\")][2]/text()')[0]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n # convert filesize to byte if possible\n try:\n filesize = float(filesize)\n\n # convert filesize to byte\n if filesize_multiplier == 'TB':\n filesize = int(filesize * 1024 * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'GB':\n filesize = int(filesize * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'MB':\n filesize = int(filesize * 1024 * 1024)\n elif filesize_multiplier == 'KB':\n filesize = int(filesize * 1024)\n except:\n filesize = None\n\n # convert files to int if possible\n if files.isdigit():\n files = int(files)\n else:\n files = None\n\n magnetlink = result.xpath(magnet_xpath)[0].attrib['href']\n\n torrentfile = result.xpath(torrent_xpath)[0].attrib['href']\n torrentfileurl = quote(torrentfile, safe=\"%/:=&?~#+!$,;'@()*\")\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'filesize': filesize,\n 'files': files,\n 'magnetlink': magnetlink,\n 'torrentfile': torrentfileurl,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/kickass.py"}]}
| 1,851 | 206 |
gh_patches_debug_5809
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-728
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValueError when saving geodataframe with pandas timestamps to shapefile
Is there any proper way to fix this error inside geopandas (not fiona)?
(Or maybe I'm trying to do the wrong thing and for example should convert my timestamps into strings before writing?)
```
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\geopandas-0.1.0.dev_6163bc9-py3.4.egg\geopandas\geodataframe.py in to_file(self, filename, driver, schema, **kwargs)
326 """
327 from geopandas.io.file import to_file
--> 328 to_file(self, filename, driver, schema, **kwargs)
329
330 def to_crs(self, crs=None, epsg=None, inplace=False):
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\geopandas-0.1.0.dev_6163bc9-py3.4.egg\geopandas\io\file.py in to_file(df, filename, driver, schema, **kwargs)
60 schema=schema, **kwargs) as c:
61 for feature in df.iterfeatures():
---> 62 c.write(feature)
63
64
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\fiona\collection.py in write(self, record)
323 def write(self, record):
324 """Stages a record for writing to disk."""
--> 325 self.writerecords([record])
326
327 def validate_record(self, record):
C:\Users\kkrasnoschekov\_soft\WinPython-64bit-3.4.3.2_1\python-3.4.3.amd64\lib\site-packages\fiona\collection.py in writerecords(self, records)
317 if self.mode not in ('a', 'w'):
318 raise IOError("collection not open for writing")
--> 319 self.session.writerecs(records, self)
320 self._len = self.session.get_length()
321 self._bounds = self.session.get_extent()
fiona/ogrext.pyx in fiona.ogrext.WritingSession.writerecs (fiona/ogrext.c:15333)()
fiona/ogrext.pyx in fiona.ogrext.OGRFeatureBuilder.build (fiona/ogrext.c:5751)()
ValueError: Invalid field type <class 'pandas.tslib.Timestamp'>
```
</issue>
<code>
[start of geopandas/io/file.py]
1 import os
2 from distutils.version import LooseVersion
3
4 import fiona
5 import numpy as np
6
7 import six
8
9 try:
10 from fiona import Env as fiona_env
11 except ImportError:
12 from fiona import drivers as fiona_env
13
14 from geopandas import GeoDataFrame, GeoSeries
15
16
17 _FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion('1.8')
18
19
20 # Adapted from pandas.io.common
21 if six.PY3:
22 from urllib.request import urlopen as _urlopen
23 from urllib.parse import urlparse as parse_url
24 from urllib.parse import uses_relative, uses_netloc, uses_params
25 else:
26 from urllib2 import urlopen as _urlopen
27 from urlparse import urlparse as parse_url
28 from urlparse import uses_relative, uses_netloc, uses_params
29
30 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
31 _VALID_URLS.discard('')
32
33
34 def _is_url(url):
35 """Check to see if *url* has a valid protocol."""
36 try:
37 return parse_url(url).scheme in _VALID_URLS
38 except:
39 return False
40
41
42 def read_file(filename, bbox=None, **kwargs):
43 """
44 Returns a GeoDataFrame from a file or URL.
45
46 Parameters
47 ----------
48 filename: str
49 Either the absolute or relative path to the file or URL to
50 be opened.
51 bbox : tuple | GeoDataFrame or GeoSeries, default None
52 Filter features by given bounding box, GeoSeries, or GeoDataFrame.
53 CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
54 **kwargs:
55 Keyword args to be passed to the `open` or `BytesCollection` method
56 in the fiona library when opening the file. For more information on
57 possible keywords, type:
58 ``import fiona; help(fiona.open)``
59
60 Examples
61 --------
62 >>> df = geopandas.read_file("nybb.shp")
63
64 Returns
65 -------
66 geodataframe : GeoDataFrame
67 """
68 if _is_url(filename):
69 req = _urlopen(filename)
70 path_or_bytes = req.read()
71 reader = fiona.BytesCollection
72 else:
73 path_or_bytes = filename
74 reader = fiona.open
75
76 with fiona_env():
77 with reader(path_or_bytes, **kwargs) as features:
78
79 # In a future Fiona release the crs attribute of features will
80 # no longer be a dict. The following code will be both forward
81 # and backward compatible.
82 if hasattr(features.crs, 'to_dict'):
83 crs = features.crs.to_dict()
84 else:
85 crs = features.crs
86
87 if bbox is not None:
88 if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
89 bbox = tuple(bbox.to_crs(crs).total_bounds)
90 assert len(bbox) == 4
91 f_filt = features.filter(bbox=bbox)
92 else:
93 f_filt = features
94
95 columns = list(features.meta["schema"]["properties"]) + ["geometry"]
96 gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
97
98 return gdf
99
100
101 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
102 **kwargs):
103 """
104 Write this GeoDataFrame to an OGR data source
105
106 A dictionary of supported OGR providers is available via:
107 >>> import fiona
108 >>> fiona.supported_drivers
109
110 Parameters
111 ----------
112 df : GeoDataFrame to be written
113 filename : string
114 File path or file handle to write to.
115 driver : string, default 'ESRI Shapefile'
116 The OGR format driver used to write the vector file.
117 schema : dict, default None
118 If specified, the schema dictionary is passed to Fiona to
119 better control how the file is written. If None, GeoPandas
120 will determine the schema based on each column's dtype
121
122 The *kwargs* are passed to fiona.open and can be used to write
123 to multi-layer data, store data within archives (zip files), etc.
124 """
125 if schema is None:
126 schema = infer_schema(df)
127 filename = os.path.abspath(os.path.expanduser(filename))
128 with fiona_env():
129 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
130 schema=schema, **kwargs) as colxn:
131 colxn.writerecords(df.iterfeatures())
132
133
134 def infer_schema(df):
135 try:
136 from collections import OrderedDict
137 except ImportError:
138 from ordereddict import OrderedDict
139
140 def convert_type(column, in_type):
141 if in_type == object:
142 return 'str'
143 out_type = type(np.zeros(1, in_type).item()).__name__
144 if out_type == 'long':
145 out_type = 'int'
146 if not _FIONA18 and out_type == 'bool':
147 raise ValueError('column "{}" is boolean type, '.format(column) +
148 'which is unsupported in file writing with fiona '
149 '< 1.8. Consider casting the column to int type.')
150 return out_type
151
152 properties = OrderedDict([
153 (col, convert_type(col, _type)) for col, _type in
154 zip(df.columns, df.dtypes) if col != df._geometry_column_name
155 ])
156
157 if df.empty:
158 raise ValueError("Cannot write empty DataFrame to file.")
159
160 # Since https://github.com/Toblerity/Fiona/issues/446 resolution,
161 # Fiona allows a list of geometry types
162 geom_types = _geometry_types(df)
163
164 schema = {'geometry': geom_types, 'properties': properties}
165
166 return schema
167
168
169 def _geometry_types(df):
170 """
171 Determine the geometry types in the GeoDataFrame for the schema.
172 """
173 if _FIONA18:
174 # Starting from Fiona 1.8, schema submitted to fiona to write a gdf
175 # can have mixed geometries:
176 # - 3D and 2D shapes can coexist in inferred schema
177 # - Shape and MultiShape types can (and must) coexist in inferred
178 # schema
179 geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()
180 geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]
181 geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()
182 geom_types_3D = ["3D " + gtype for gtype in geom_types_3D
183 if gtype is not None]
184 geom_types = geom_types_3D + geom_types_2D
185
186 else:
187 # Before Fiona 1.8, schema submitted to write a gdf should have
188 # one single geometry type whenever possible:
189 # - 3D and 2D shapes cannot coexist in inferred schema
190 # - Shape and MultiShape can not coexist in inferred schema
191 geom_types = _geometry_types_back_compat(df)
192
193 if len(geom_types) == 0:
194 # Default geometry type supported by Fiona
195 # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)
196 return 'Unknown'
197
198 if len(geom_types) == 1:
199 geom_types = geom_types[0]
200
201 return geom_types
202
203
204 def _geometry_types_back_compat(df):
205 """
206 for backward compatibility with Fiona<1.8 only
207 """
208 unique_geom_types = df.geometry.geom_type.unique()
209 unique_geom_types = [
210 gtype for gtype in unique_geom_types if gtype is not None]
211
212 # merge single and Multi types (eg Polygon and MultiPolygon)
213 unique_geom_types = [
214 gtype for gtype in unique_geom_types
215 if not gtype.startswith('Multi') or gtype[5:] not in unique_geom_types]
216
217 if df.geometry.has_z.any():
218 # declare all geometries as 3D geometries
219 unique_geom_types = ["3D " + type for type in unique_geom_types]
220 # by default, all geometries are 2D geometries
221
222 return unique_geom_types
223
[end of geopandas/io/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -140,6 +140,9 @@
def convert_type(column, in_type):
if in_type == object:
return 'str'
+ if in_type.name.startswith('datetime64'):
+ # numpy datetime type regardless of frequency
+ return 'datetime'
out_type = type(np.zeros(1, in_type).item()).__name__
if out_type == 'long':
out_type = 'int'
|
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -140,6 +140,9 @@\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n+ if in_type.name.startswith('datetime64'):\n+ # numpy datetime type regardless of frequency\n+ return 'datetime'\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == 'long':\n out_type = 'int'\n", "issue": "ValueError when saving geodataframe with pandas timestamps to shapefile\nIs there any proper way to fix this error inside geopandas (not fiona)?\n\n(Or maybe I'm trying to do the wrong thing and for example should convert my timestamps into strings before writing?)\n\n```\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\geopandas-0.1.0.dev_6163bc9-py3.4.egg\\geopandas\\geodataframe.py in to_file(self, filename, driver, schema, **kwargs)\n 326 \"\"\"\n 327 from geopandas.io.file import to_file\n--> 328 to_file(self, filename, driver, schema, **kwargs)\n 329 \n 330 def to_crs(self, crs=None, epsg=None, inplace=False):\n\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\geopandas-0.1.0.dev_6163bc9-py3.4.egg\\geopandas\\io\\file.py in to_file(df, filename, driver, schema, **kwargs)\n 60 schema=schema, **kwargs) as c:\n 61 for feature in df.iterfeatures():\n---> 62 c.write(feature)\n 63 \n 64 \n\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\fiona\\collection.py in write(self, record)\n 323 def write(self, record):\n 324 \"\"\"Stages a record for writing to disk.\"\"\"\n--> 325 self.writerecords([record])\n 326 \n 327 def validate_record(self, record):\n\nC:\\Users\\kkrasnoschekov\\_soft\\WinPython-64bit-3.4.3.2_1\\python-3.4.3.amd64\\lib\\site-packages\\fiona\\collection.py in writerecords(self, records)\n 317 if self.mode not in ('a', 'w'):\n 318 raise IOError(\"collection not open for writing\")\n--> 319 self.session.writerecs(records, self)\n 320 self._len = self.session.get_length()\n 321 self._bounds = self.session.get_extent()\n\nfiona/ogrext.pyx in fiona.ogrext.WritingSession.writerecs (fiona/ogrext.c:15333)()\n\nfiona/ogrext.pyx in fiona.ogrext.OGRFeatureBuilder.build (fiona/ogrext.c:5751)()\n\nValueError: Invalid field type <class 'pandas.tslib.Timestamp'>\n```\n\n", "before_files": [{"content": "import os\nfrom distutils.version import LooseVersion\n\nimport fiona\nimport numpy as np\n\nimport six\n\ntry:\n from fiona import Env as fiona_env\nexcept ImportError:\n from fiona import drivers as fiona_env\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n\n_FIONA18 = LooseVersion(fiona.__version__) >= LooseVersion('1.8')\n\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with fiona_env():\n with reader(path_or_bytes, **kwargs) as features:\n\n # In a future Fiona release the crs attribute of features will\n # no longer be a dict. The following code will be both forward\n # and backward compatible.\n if hasattr(features.crs, 'to_dict'):\n crs = features.crs.to_dict()\n else:\n crs = features.crs\n\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona_env():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.zeros(1, in_type).item()).__name__\n if out_type == 'long':\n out_type = 'int'\n if not _FIONA18 and out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing with fiona '\n '< 1.8. Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n # Since https://github.com/Toblerity/Fiona/issues/446 resolution,\n # Fiona allows a list of geometry types\n geom_types = _geometry_types(df)\n\n schema = {'geometry': geom_types, 'properties': properties}\n\n return schema\n\n\ndef _geometry_types(df):\n \"\"\"\n Determine the geometry types in the GeoDataFrame for the schema.\n \"\"\"\n if _FIONA18:\n # Starting from Fiona 1.8, schema submitted to fiona to write a gdf\n # can have mixed geometries:\n # - 3D and 2D shapes can coexist in inferred schema\n # - Shape and MultiShape types can (and must) coexist in inferred\n # schema\n geom_types_2D = df[~df.geometry.has_z].geometry.geom_type.unique()\n geom_types_2D = [gtype for gtype in geom_types_2D if gtype is not None]\n geom_types_3D = df[df.geometry.has_z].geometry.geom_type.unique()\n geom_types_3D = [\"3D \" + gtype for gtype in geom_types_3D\n if gtype is not None]\n geom_types = geom_types_3D + geom_types_2D\n\n else:\n # Before Fiona 1.8, schema submitted to write a gdf should have\n # one single geometry type whenever possible:\n # - 3D and 2D shapes cannot coexist in inferred schema\n # - Shape and MultiShape can not coexist in inferred schema\n geom_types = _geometry_types_back_compat(df)\n\n if len(geom_types) == 0:\n # Default geometry type supported by Fiona\n # (Since https://github.com/Toblerity/Fiona/issues/446 resolution)\n return 'Unknown'\n\n if len(geom_types) == 1:\n geom_types = geom_types[0]\n\n return geom_types\n\n\ndef _geometry_types_back_compat(df):\n \"\"\"\n for backward compatibility with Fiona<1.8 only\n \"\"\"\n unique_geom_types = df.geometry.geom_type.unique()\n unique_geom_types = [\n gtype for gtype in unique_geom_types if gtype is not None]\n\n # merge single and Multi types (eg Polygon and MultiPolygon)\n unique_geom_types = [\n gtype for gtype in unique_geom_types\n if not gtype.startswith('Multi') or gtype[5:] not in unique_geom_types]\n\n if df.geometry.has_z.any():\n # declare all geometries as 3D geometries\n unique_geom_types = [\"3D \" + type for type in unique_geom_types]\n # by default, all geometries are 2D geometries\n\n return unique_geom_types\n", "path": "geopandas/io/file.py"}]}
| 3,585 | 132 |
gh_patches_debug_21270
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-210
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 'About' page content
Using content from existing website.
</issue>
<code>
[start of csunplugged/general/urls.py]
1 """URL routing for the general application."""
2
3 from django.conf.urls import url
4
5 from . import views
6
7 urlpatterns = [
8 url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
9 url(r'^$', views.GeneralIndexView.as_view(), name='home'),
10 ]
11
[end of csunplugged/general/urls.py]
[start of csunplugged/general/views.py]
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = 'general/index.html'
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = 'general/about.html'
17
18
19 def health_check(request):
20 """Return heath check response for Google App Engine.
21
22 Returns a 200 HTTP response for Google App Engine to detect the system
23 is running.
24 """
25 return HttpResponse(status=200)
26
[end of csunplugged/general/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py
--- a/csunplugged/general/urls.py
+++ b/csunplugged/general/urls.py
@@ -6,5 +6,8 @@
urlpatterns = [
url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),
+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),
+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),
url(r'^$', views.GeneralIndexView.as_view(), name='home'),
]
diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -16,6 +16,24 @@
template_name = 'general/about.html'
+class GeneralContactView(TemplateView):
+ """View for the contact page that renders from a template."""
+
+ template_name = 'general/contact.html'
+
+
+class GeneralPeopleView(TemplateView):
+ """View for the people page that renders from a template."""
+
+ template_name = 'general/people.html'
+
+
+class GeneralPrinciplesView(TemplateView):
+ """View for the princples page that renders from a template."""
+
+ template_name = 'general/principles.html'
+
+
def health_check(request):
"""Return heath check response for Google App Engine.
|
{"golden_diff": "diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py\n--- a/csunplugged/general/urls.py\n+++ b/csunplugged/general/urls.py\n@@ -6,5 +6,8 @@\n \n urlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),\n+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),\n+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n ]\ndiff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -16,6 +16,24 @@\n template_name = 'general/about.html'\n \n \n+class GeneralContactView(TemplateView):\n+ \"\"\"View for the contact page that renders from a template.\"\"\"\n+\n+ template_name = 'general/contact.html'\n+\n+\n+class GeneralPeopleView(TemplateView):\n+ \"\"\"View for the people page that renders from a template.\"\"\"\n+\n+ template_name = 'general/people.html'\n+\n+\n+class GeneralPrinciplesView(TemplateView):\n+ \"\"\"View for the princples page that renders from a template.\"\"\"\n+\n+ template_name = 'general/principles.html'\n+\n+\n def health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n", "issue": "Add 'About' page content\nUsing content from existing website.\n", "before_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = 'general/about.html'\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]}
| 830 | 345 |
gh_patches_debug_26686
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-24878
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImportError in bench get-untranslated: Circular Import Issue
I encountered an ImportError while running the '**bench get-untranslated**' command in Frappe. This issue seems to be related to a circular import in the '**frappe.translate**' module. When I execute '**bench get-untranslated RU RU_UNTRANSLATED.TXT**', the system throws an ImportError, stating that it cannot import '**get_all_translations**' from the partially initialized module '**frappe.translate**'. This error points towards a potential circular import problem.
### Steps to Reproduce:
1. Run in Frappe Bench environment:
> bench get-untranslated RU RU_UNTRANSLATED.TXT
2. Observe the ImportError related to 'get_all_translations'.
I suspect this might be due to a cyclical dependency within the Frappe's translate module. Any insights or fixes would be greatly appreciated.
</issue>
<code>
[start of frappe/model/__init__.py]
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # License: MIT. See LICENSE
3
4 # model __init__.py
5 import frappe
6 from frappe import _
7
8 data_fieldtypes = (
9 "Currency",
10 "Int",
11 "Long Int",
12 "Float",
13 "Percent",
14 "Check",
15 "Small Text",
16 "Long Text",
17 "Code",
18 "Text Editor",
19 "Markdown Editor",
20 "HTML Editor",
21 "Date",
22 "Datetime",
23 "Time",
24 "Text",
25 "Data",
26 "Link",
27 "Dynamic Link",
28 "Password",
29 "Select",
30 "Rating",
31 "Read Only",
32 "Attach",
33 "Attach Image",
34 "Signature",
35 "Color",
36 "Barcode",
37 "Geolocation",
38 "Duration",
39 "Icon",
40 "Phone",
41 "Autocomplete",
42 "JSON",
43 )
44
45 float_like_fields = {"Float", "Currency", "Percent"}
46 datetime_fields = {"Datetime", "Date", "Time"}
47
48 attachment_fieldtypes = (
49 "Attach",
50 "Attach Image",
51 )
52
53 no_value_fields = (
54 "Section Break",
55 "Column Break",
56 "Tab Break",
57 "HTML",
58 "Table",
59 "Table MultiSelect",
60 "Button",
61 "Image",
62 "Fold",
63 "Heading",
64 )
65
66 display_fieldtypes = (
67 "Section Break",
68 "Column Break",
69 "Tab Break",
70 "HTML",
71 "Button",
72 "Image",
73 "Fold",
74 "Heading",
75 )
76
77 numeric_fieldtypes = ("Currency", "Int", "Long Int", "Float", "Percent", "Check")
78
79 data_field_options = ("Email", "Name", "Phone", "URL", "Barcode")
80
81 default_fields = (
82 "doctype",
83 "name",
84 "owner",
85 "creation",
86 "modified",
87 "modified_by",
88 "docstatus",
89 "idx",
90 )
91
92 child_table_fields = ("parent", "parentfield", "parenttype")
93
94 optional_fields = ("_user_tags", "_comments", "_assign", "_liked_by", "_seen")
95
96 table_fields = ("Table", "Table MultiSelect")
97
98 core_doctypes_list = (
99 "DefaultValue",
100 "DocType",
101 "DocField",
102 "DocPerm",
103 "DocType Action",
104 "DocType Link",
105 "User",
106 "Role",
107 "Has Role",
108 "Page",
109 "Module Def",
110 "Print Format",
111 "Report",
112 "Customize Form",
113 "Customize Form Field",
114 "Property Setter",
115 "Custom Field",
116 "Client Script",
117 )
118
119 log_types = (
120 "Version",
121 "Error Log",
122 "Scheduled Job Log",
123 "Event Sync Log",
124 "Event Update Log",
125 "Access Log",
126 "View Log",
127 "Activity Log",
128 "Energy Point Log",
129 "Notification Log",
130 "Email Queue",
131 "DocShare",
132 "Document Follow",
133 "Console Log",
134 )
135
136 std_fields = [
137 {"fieldname": "name", "fieldtype": "Link", "label": _("ID")},
138 {"fieldname": "owner", "fieldtype": "Link", "label": _("Created By"), "options": "User"},
139 {"fieldname": "idx", "fieldtype": "Int", "label": _("Index")},
140 {"fieldname": "creation", "fieldtype": "Datetime", "label": _("Created On")},
141 {"fieldname": "modified", "fieldtype": "Datetime", "label": _("Last Updated On")},
142 {
143 "fieldname": "modified_by",
144 "fieldtype": "Link",
145 "label": _("Last Updated By"),
146 "options": "User",
147 },
148 {"fieldname": "_user_tags", "fieldtype": "Data", "label": _("Tags")},
149 {"fieldname": "_liked_by", "fieldtype": "Data", "label": _("Liked By")},
150 {"fieldname": "_comments", "fieldtype": "Text", "label": _("Comments")},
151 {"fieldname": "_assign", "fieldtype": "Text", "label": _("Assigned To")},
152 {"fieldname": "docstatus", "fieldtype": "Int", "label": _("Document Status")},
153 ]
154
155
156 def delete_fields(args_dict, delete=0):
157 """
158 Delete a field.
159 * Deletes record from `tabDocField`
160 * If not single doctype: Drops column from table
161 * If single, deletes record from `tabSingles`
162 args_dict = { dt: [field names] }
163 """
164 import frappe.utils
165
166 for dt in args_dict:
167 fields = args_dict[dt]
168 if not fields:
169 continue
170
171 frappe.db.delete(
172 "DocField",
173 {
174 "parent": dt,
175 "fieldname": ("in", fields),
176 },
177 )
178
179 # Delete the data/column only if delete is specified
180 if not delete:
181 continue
182
183 if frappe.db.get_value("DocType", dt, "issingle"):
184 frappe.db.delete(
185 "Singles",
186 {
187 "doctype": dt,
188 "field": ("in", fields),
189 },
190 )
191 else:
192 existing_fields = frappe.db.describe(dt)
193 existing_fields = existing_fields and [e[0] for e in existing_fields] or []
194 fields_need_to_delete = set(fields) & set(existing_fields)
195 if not fields_need_to_delete:
196 continue
197
198 if frappe.db.db_type == "mariadb":
199 # mariadb implicitly commits before DDL, make it explicit
200 frappe.db.commit()
201
202 query = "ALTER TABLE `tab%s` " % dt + ", ".join(
203 "DROP COLUMN `%s`" % f for f in fields_need_to_delete
204 )
205 frappe.db.sql(query)
206
207 if frappe.db.db_type == "postgres":
208 # commit the results to db
209 frappe.db.commit()
210
211
212 def get_permitted_fields(
213 doctype: str,
214 parenttype: str | None = None,
215 user: str | None = None,
216 permission_type: str | None = None,
217 *,
218 ignore_virtual=False,
219 ) -> list[str]:
220 meta = frappe.get_meta(doctype)
221 valid_columns = meta.get_valid_columns()
222
223 if doctype in core_doctypes_list:
224 return valid_columns
225
226 # DocType has only fields of type Table (Table, Table MultiSelect)
227 if set(valid_columns).issubset(default_fields):
228 return valid_columns
229
230 if permission_type is None:
231 permission_type = "select" if frappe.only_has_select_perm(doctype, user=user) else "read"
232
233 meta_fields = meta.default_fields.copy()
234 optional_meta_fields = [x for x in optional_fields if x in valid_columns]
235
236 if permitted_fields := meta.get_permitted_fieldnames(
237 parenttype=parenttype,
238 user=user,
239 permission_type=permission_type,
240 with_virtual_fields=not ignore_virtual,
241 ):
242 if permission_type == "select":
243 return permitted_fields
244
245 if meta.istable:
246 meta_fields.extend(child_table_fields)
247
248 return meta_fields + permitted_fields + optional_meta_fields
249
250 return meta_fields + optional_meta_fields
251
252
253 def is_default_field(fieldname: str) -> bool:
254 return fieldname in default_fields
255
[end of frappe/model/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/frappe/model/__init__.py b/frappe/model/__init__.py
--- a/frappe/model/__init__.py
+++ b/frappe/model/__init__.py
@@ -134,22 +134,22 @@
)
std_fields = [
- {"fieldname": "name", "fieldtype": "Link", "label": _("ID")},
- {"fieldname": "owner", "fieldtype": "Link", "label": _("Created By"), "options": "User"},
- {"fieldname": "idx", "fieldtype": "Int", "label": _("Index")},
- {"fieldname": "creation", "fieldtype": "Datetime", "label": _("Created On")},
- {"fieldname": "modified", "fieldtype": "Datetime", "label": _("Last Updated On")},
+ {"fieldname": "name", "fieldtype": "Link", "label": "ID"},
+ {"fieldname": "owner", "fieldtype": "Link", "label": "Created By", "options": "User"},
+ {"fieldname": "idx", "fieldtype": "Int", "label": "Index"},
+ {"fieldname": "creation", "fieldtype": "Datetime", "label": "Created On"},
+ {"fieldname": "modified", "fieldtype": "Datetime", "label": "Last Updated On"},
{
"fieldname": "modified_by",
"fieldtype": "Link",
- "label": _("Last Updated By"),
+ "label": "Last Updated By",
"options": "User",
},
- {"fieldname": "_user_tags", "fieldtype": "Data", "label": _("Tags")},
- {"fieldname": "_liked_by", "fieldtype": "Data", "label": _("Liked By")},
- {"fieldname": "_comments", "fieldtype": "Text", "label": _("Comments")},
- {"fieldname": "_assign", "fieldtype": "Text", "label": _("Assigned To")},
- {"fieldname": "docstatus", "fieldtype": "Int", "label": _("Document Status")},
+ {"fieldname": "_user_tags", "fieldtype": "Data", "label": "Tags"},
+ {"fieldname": "_liked_by", "fieldtype": "Data", "label": "Liked By"},
+ {"fieldname": "_comments", "fieldtype": "Text", "label": "Comments"},
+ {"fieldname": "_assign", "fieldtype": "Text", "label": "Assigned To"},
+ {"fieldname": "docstatus", "fieldtype": "Int", "label": "Document Status"},
]
|
{"golden_diff": "diff --git a/frappe/model/__init__.py b/frappe/model/__init__.py\n--- a/frappe/model/__init__.py\n+++ b/frappe/model/__init__.py\n@@ -134,22 +134,22 @@\n )\n \n std_fields = [\n-\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": _(\"ID\")},\n-\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": _(\"Created By\"), \"options\": \"User\"},\n-\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": _(\"Index\")},\n-\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": _(\"Created On\")},\n-\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": _(\"Last Updated On\")},\n+\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": \"ID\"},\n+\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": \"Created By\", \"options\": \"User\"},\n+\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": \"Index\"},\n+\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": \"Created On\"},\n+\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": \"Last Updated On\"},\n \t{\n \t\t\"fieldname\": \"modified_by\",\n \t\t\"fieldtype\": \"Link\",\n-\t\t\"label\": _(\"Last Updated By\"),\n+\t\t\"label\": \"Last Updated By\",\n \t\t\"options\": \"User\",\n \t},\n-\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": _(\"Tags\")},\n-\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": _(\"Liked By\")},\n-\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": _(\"Comments\")},\n-\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": _(\"Assigned To\")},\n-\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": _(\"Document Status\")},\n+\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": \"Tags\"},\n+\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": \"Liked By\"},\n+\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": \"Comments\"},\n+\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": \"Assigned To\"},\n+\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": \"Document Status\"},\n ]\n", "issue": "ImportError in bench get-untranslated: Circular Import Issue\nI encountered an ImportError while running the '**bench get-untranslated**' command in Frappe. This issue seems to be related to a circular import in the '**frappe.translate**' module. When I execute '**bench get-untranslated RU RU_UNTRANSLATED.TXT**', the system throws an ImportError, stating that it cannot import '**get_all_translations**' from the partially initialized module '**frappe.translate**'. This error points towards a potential circular import problem.\r\n\r\n### Steps to Reproduce:\r\n\r\n1. Run in Frappe Bench environment:\r\n\r\n> bench get-untranslated RU RU_UNTRANSLATED.TXT\r\n\r\n2. Observe the ImportError related to 'get_all_translations'.\r\n\r\nI suspect this might be due to a cyclical dependency within the Frappe's translate module. Any insights or fixes would be greatly appreciated.\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\n# model __init__.py\nimport frappe\nfrom frappe import _\n\ndata_fieldtypes = (\n\t\"Currency\",\n\t\"Int\",\n\t\"Long Int\",\n\t\"Float\",\n\t\"Percent\",\n\t\"Check\",\n\t\"Small Text\",\n\t\"Long Text\",\n\t\"Code\",\n\t\"Text Editor\",\n\t\"Markdown Editor\",\n\t\"HTML Editor\",\n\t\"Date\",\n\t\"Datetime\",\n\t\"Time\",\n\t\"Text\",\n\t\"Data\",\n\t\"Link\",\n\t\"Dynamic Link\",\n\t\"Password\",\n\t\"Select\",\n\t\"Rating\",\n\t\"Read Only\",\n\t\"Attach\",\n\t\"Attach Image\",\n\t\"Signature\",\n\t\"Color\",\n\t\"Barcode\",\n\t\"Geolocation\",\n\t\"Duration\",\n\t\"Icon\",\n\t\"Phone\",\n\t\"Autocomplete\",\n\t\"JSON\",\n)\n\nfloat_like_fields = {\"Float\", \"Currency\", \"Percent\"}\ndatetime_fields = {\"Datetime\", \"Date\", \"Time\"}\n\nattachment_fieldtypes = (\n\t\"Attach\",\n\t\"Attach Image\",\n)\n\nno_value_fields = (\n\t\"Section Break\",\n\t\"Column Break\",\n\t\"Tab Break\",\n\t\"HTML\",\n\t\"Table\",\n\t\"Table MultiSelect\",\n\t\"Button\",\n\t\"Image\",\n\t\"Fold\",\n\t\"Heading\",\n)\n\ndisplay_fieldtypes = (\n\t\"Section Break\",\n\t\"Column Break\",\n\t\"Tab Break\",\n\t\"HTML\",\n\t\"Button\",\n\t\"Image\",\n\t\"Fold\",\n\t\"Heading\",\n)\n\nnumeric_fieldtypes = (\"Currency\", \"Int\", \"Long Int\", \"Float\", \"Percent\", \"Check\")\n\ndata_field_options = (\"Email\", \"Name\", \"Phone\", \"URL\", \"Barcode\")\n\ndefault_fields = (\n\t\"doctype\",\n\t\"name\",\n\t\"owner\",\n\t\"creation\",\n\t\"modified\",\n\t\"modified_by\",\n\t\"docstatus\",\n\t\"idx\",\n)\n\nchild_table_fields = (\"parent\", \"parentfield\", \"parenttype\")\n\noptional_fields = (\"_user_tags\", \"_comments\", \"_assign\", \"_liked_by\", \"_seen\")\n\ntable_fields = (\"Table\", \"Table MultiSelect\")\n\ncore_doctypes_list = (\n\t\"DefaultValue\",\n\t\"DocType\",\n\t\"DocField\",\n\t\"DocPerm\",\n\t\"DocType Action\",\n\t\"DocType Link\",\n\t\"User\",\n\t\"Role\",\n\t\"Has Role\",\n\t\"Page\",\n\t\"Module Def\",\n\t\"Print Format\",\n\t\"Report\",\n\t\"Customize Form\",\n\t\"Customize Form Field\",\n\t\"Property Setter\",\n\t\"Custom Field\",\n\t\"Client Script\",\n)\n\nlog_types = (\n\t\"Version\",\n\t\"Error Log\",\n\t\"Scheduled Job Log\",\n\t\"Event Sync Log\",\n\t\"Event Update Log\",\n\t\"Access Log\",\n\t\"View Log\",\n\t\"Activity Log\",\n\t\"Energy Point Log\",\n\t\"Notification Log\",\n\t\"Email Queue\",\n\t\"DocShare\",\n\t\"Document Follow\",\n\t\"Console Log\",\n)\n\nstd_fields = [\n\t{\"fieldname\": \"name\", \"fieldtype\": \"Link\", \"label\": _(\"ID\")},\n\t{\"fieldname\": \"owner\", \"fieldtype\": \"Link\", \"label\": _(\"Created By\"), \"options\": \"User\"},\n\t{\"fieldname\": \"idx\", \"fieldtype\": \"Int\", \"label\": _(\"Index\")},\n\t{\"fieldname\": \"creation\", \"fieldtype\": \"Datetime\", \"label\": _(\"Created On\")},\n\t{\"fieldname\": \"modified\", \"fieldtype\": \"Datetime\", \"label\": _(\"Last Updated On\")},\n\t{\n\t\t\"fieldname\": \"modified_by\",\n\t\t\"fieldtype\": \"Link\",\n\t\t\"label\": _(\"Last Updated By\"),\n\t\t\"options\": \"User\",\n\t},\n\t{\"fieldname\": \"_user_tags\", \"fieldtype\": \"Data\", \"label\": _(\"Tags\")},\n\t{\"fieldname\": \"_liked_by\", \"fieldtype\": \"Data\", \"label\": _(\"Liked By\")},\n\t{\"fieldname\": \"_comments\", \"fieldtype\": \"Text\", \"label\": _(\"Comments\")},\n\t{\"fieldname\": \"_assign\", \"fieldtype\": \"Text\", \"label\": _(\"Assigned To\")},\n\t{\"fieldname\": \"docstatus\", \"fieldtype\": \"Int\", \"label\": _(\"Document Status\")},\n]\n\n\ndef delete_fields(args_dict, delete=0):\n\t\"\"\"\n\tDelete a field.\n\t* Deletes record from `tabDocField`\n\t* If not single doctype: Drops column from table\n\t* If single, deletes record from `tabSingles`\n\targs_dict = { dt: [field names] }\n\t\"\"\"\n\timport frappe.utils\n\n\tfor dt in args_dict:\n\t\tfields = args_dict[dt]\n\t\tif not fields:\n\t\t\tcontinue\n\n\t\tfrappe.db.delete(\n\t\t\t\"DocField\",\n\t\t\t{\n\t\t\t\t\"parent\": dt,\n\t\t\t\t\"fieldname\": (\"in\", fields),\n\t\t\t},\n\t\t)\n\n\t\t# Delete the data/column only if delete is specified\n\t\tif not delete:\n\t\t\tcontinue\n\n\t\tif frappe.db.get_value(\"DocType\", dt, \"issingle\"):\n\t\t\tfrappe.db.delete(\n\t\t\t\t\"Singles\",\n\t\t\t\t{\n\t\t\t\t\t\"doctype\": dt,\n\t\t\t\t\t\"field\": (\"in\", fields),\n\t\t\t\t},\n\t\t\t)\n\t\telse:\n\t\t\texisting_fields = frappe.db.describe(dt)\n\t\t\texisting_fields = existing_fields and [e[0] for e in existing_fields] or []\n\t\t\tfields_need_to_delete = set(fields) & set(existing_fields)\n\t\t\tif not fields_need_to_delete:\n\t\t\t\tcontinue\n\n\t\t\tif frappe.db.db_type == \"mariadb\":\n\t\t\t\t# mariadb implicitly commits before DDL, make it explicit\n\t\t\t\tfrappe.db.commit()\n\n\t\t\tquery = \"ALTER TABLE `tab%s` \" % dt + \", \".join(\n\t\t\t\t\"DROP COLUMN `%s`\" % f for f in fields_need_to_delete\n\t\t\t)\n\t\t\tfrappe.db.sql(query)\n\n\t\tif frappe.db.db_type == \"postgres\":\n\t\t\t# commit the results to db\n\t\t\tfrappe.db.commit()\n\n\ndef get_permitted_fields(\n\tdoctype: str,\n\tparenttype: str | None = None,\n\tuser: str | None = None,\n\tpermission_type: str | None = None,\n\t*,\n\tignore_virtual=False,\n) -> list[str]:\n\tmeta = frappe.get_meta(doctype)\n\tvalid_columns = meta.get_valid_columns()\n\n\tif doctype in core_doctypes_list:\n\t\treturn valid_columns\n\n\t# DocType has only fields of type Table (Table, Table MultiSelect)\n\tif set(valid_columns).issubset(default_fields):\n\t\treturn valid_columns\n\n\tif permission_type is None:\n\t\tpermission_type = \"select\" if frappe.only_has_select_perm(doctype, user=user) else \"read\"\n\n\tmeta_fields = meta.default_fields.copy()\n\toptional_meta_fields = [x for x in optional_fields if x in valid_columns]\n\n\tif permitted_fields := meta.get_permitted_fieldnames(\n\t\tparenttype=parenttype,\n\t\tuser=user,\n\t\tpermission_type=permission_type,\n\t\twith_virtual_fields=not ignore_virtual,\n\t):\n\t\tif permission_type == \"select\":\n\t\t\treturn permitted_fields\n\n\t\tif meta.istable:\n\t\t\tmeta_fields.extend(child_table_fields)\n\n\t\treturn meta_fields + permitted_fields + optional_meta_fields\n\n\treturn meta_fields + optional_meta_fields\n\n\ndef is_default_field(fieldname: str) -> bool:\n\treturn fieldname in default_fields\n", "path": "frappe/model/__init__.py"}]}
| 3,095 | 567 |
gh_patches_debug_1284
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-875
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
If DB_PORT is not set, config process sets port to an empty string
I have an existing environment that sets up the datacube connection using this:
```
- DB_HOSTNAME=host.docker.internal
- DB_USERNAME=opendatacube
- DB_PASSWORD=opendatacubepassword
- DB_DATABASE=opendatacube
```
and with the new changes to read config from environment variables over the config file, the port is required to be set with `DB_PORT=5432`.
Expected behaviour was that if the port is blank it is set to the default for Postgres.
https://github.com/opendatacube/datacube-core/blob/8481d907b198a1c8946326b8b70625a9a8523a12/datacube/config.py#L265
</issue>
<code>
[start of datacube/config.py]
1 # coding=utf-8
2 """
3 User configuration.
4 """
5
6 import os
7 from pathlib import Path
8 import configparser
9 from urllib.parse import unquote_plus, urlparse
10 from typing import Optional, Iterable, Union, Any, Tuple, Dict
11
12 PathLike = Union[str, 'os.PathLike[Any]']
13
14
15 ENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'
16 #: Config locations in order. Properties found in latter locations override
17 #: earlier ones.
18 #:
19 #: - `/etc/datacube.conf`
20 #: - file at `$DATACUBE_CONFIG_PATH` environment variable
21 #: - `~/.datacube.conf`
22 #: - `datacube.conf`
23 DEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',
24 os.environ.get(ENVIRONMENT_VARNAME, ''),
25 str(os.path.expanduser("~/.datacube.conf")),
26 'datacube.conf'] if len(p) > 0)
27
28 DEFAULT_ENV = 'default'
29
30 # Default configuration options.
31 _DEFAULT_CONF = """
32 [DEFAULT]
33 # Blank implies localhost
34 db_hostname:
35 db_database: datacube
36 index_driver: default
37 # If a connection is unused for this length of time, expect it to be invalidated.
38 db_connection_timeout: 60
39
40 [user]
41 # Which environment to use when none is specified explicitly.
42 # note: will fail if default_environment points to non-existent section
43 # default_environment: datacube
44 """
45
46 #: Used in place of None as a default, when None is a valid but not default parameter to a function
47 _UNSET = object()
48
49
50 def read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:
51 config = configparser.ConfigParser()
52 if default_text is not None:
53 config.read_string(default_text)
54 return config
55
56
57 class LocalConfig(object):
58 """
59 System configuration for the user.
60
61 This loads from a set of possible configuration files which define the available environments.
62 An environment contains connection details for a Data Cube Index, which provides access to
63 available data.
64
65 """
66
67 def __init__(self, config: configparser.ConfigParser,
68 files_loaded: Optional[Iterable[str]] = None,
69 env: Optional[str] = None):
70 """
71 Datacube environment resolution precedence is:
72 1. Supplied as a function argument `env`
73 2. DATACUBE_ENVIRONMENT environment variable
74 3. user.default_environment option in the config
75 4. 'default' or 'datacube' whichever is present
76
77 If environment is supplied by any of the first 3 methods is not present
78 in the config, then throw an exception.
79 """
80 self._config = config
81 self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))
82
83 if env is None:
84 env = os.environ.get('DATACUBE_ENVIRONMENT',
85 config.get('user', 'default_environment', fallback=None))
86
87 # If the user specifies a particular env, we either want to use it or Fail
88 if env:
89 if config.has_section(env):
90 self._env = env
91 # All is good
92 return
93 else:
94 raise ValueError('No config section found for environment %r' % (env,))
95 else:
96 # If an env hasn't been specifically selected, we can fall back defaults
97 fallbacks = [DEFAULT_ENV, 'datacube']
98 for fallback_env in fallbacks:
99 if config.has_section(fallback_env):
100 self._env = fallback_env
101 return
102 raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)
103
104 @classmethod
105 def find(cls,
106 paths: Optional[Union[str, Iterable[PathLike]]] = None,
107 env: Optional[str] = None) -> 'LocalConfig':
108 """
109 Find config from environment variables or possible filesystem locations.
110
111 'env' is which environment to use from the config: it corresponds to the name of a
112 config section
113 """
114 config = read_config(_DEFAULT_CONF)
115
116 if paths is None:
117 if env is None:
118 env_opts = parse_env_params()
119 if env_opts:
120 return _cfg_from_env_opts(env_opts, config)
121
122 paths = DEFAULT_CONF_PATHS
123
124 if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+
125 paths = [str(paths)]
126
127 files_loaded = config.read(str(p) for p in paths if p)
128
129 return LocalConfig(
130 config,
131 files_loaded=files_loaded,
132 env=env,
133 )
134
135 def get(self, item: str, fallback=_UNSET):
136 if fallback == _UNSET:
137 return self._config.get(self._env, item)
138 else:
139 return self._config.get(self._env, item, fallback=fallback)
140
141 def __getitem__(self, item: str):
142 return self.get(item, fallback=None)
143
144 def __str__(self) -> str:
145 return "LocalConfig<loaded_from={}, environment={!r}, config={}>".format(
146 self.files_loaded or 'defaults',
147 self._env,
148 dict(self._config[self._env]),
149 )
150
151 def __repr__(self) -> str:
152 return str(self)
153
154
155 OPTIONS = {'reproject_threads': 4}
156
157
158 #: pylint: disable=invalid-name
159 class set_options(object):
160 """Set global state within a controlled context
161
162 Currently, the only supported options are:
163 * reproject_threads: The number of threads to use when reprojecting
164
165 You can use ``set_options`` either as a context manager::
166
167 with datacube.set_options(reproject_threads=16):
168 ...
169
170 Or to set global options::
171
172 datacube.set_options(reproject_threads=16)
173 """
174
175 def __init__(self, **kwargs):
176 self.old = OPTIONS.copy()
177 OPTIONS.update(kwargs)
178
179 def __enter__(self):
180 return
181
182 def __exit__(self, exc_type, value, traceback):
183 OPTIONS.clear()
184 OPTIONS.update(self.old)
185
186
187 DB_KEYS = ('hostname', 'port', 'database', 'username', 'password')
188
189
190 def parse_connect_url(url: str) -> Dict[str, str]:
191 """ Extract database,hostname,port,username,password from db URL.
192
193 Example: postgresql://username:password@hostname:port/database
194
195 For local password-less db use `postgresql:///<your db>`
196 """
197 def split2(s: str, separator: str) -> Tuple[str, str]:
198 i = s.find(separator)
199 return (s, '') if i < 0 else (s[:i], s[i+1:])
200
201 _, netloc, path, *_ = urlparse(url)
202
203 db = path[1:] if path else ''
204 if '@' in netloc:
205 (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))
206 else:
207 user, password = '', ''
208 host, port = split2(netloc, ':')
209
210 oo = dict(hostname=host, database=db)
211
212 if port:
213 oo['port'] = port
214 if password:
215 oo['password'] = unquote_plus(password)
216 if user:
217 oo['username'] = user
218 return oo
219
220
221 def parse_env_params() -> Dict[str, str]:
222 """
223 - Extract parameters from DATACUBE_DB_URL if present
224 - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE
225 - Return {} otherwise
226 """
227
228 db_url = os.environ.get('DATACUBE_DB_URL', None)
229 if db_url is not None:
230 return parse_connect_url(db_url)
231
232 params = {k: os.environ.get('DB_{}'.format(k.upper()), None)
233 for k in DB_KEYS}
234 return {k: v
235 for k, v in params.items()
236 if v is not None}
237
238
239 def _cfg_from_env_opts(opts: Dict[str, str],
240 base: configparser.ConfigParser) -> LocalConfig:
241 base['default'] = {'db_'+k: v for k, v in opts.items()}
242 return LocalConfig(base, files_loaded=[], env='default')
243
244
245 def render_dc_config(params: Dict[str, Any],
246 section_name: str = 'default') -> str:
247 """ Render output of parse_env_params to a string that can be written to config file.
248 """
249 oo = '[{}]\n'.format(section_name)
250 for k in DB_KEYS:
251 v = params.get(k, None)
252 if v is not None:
253 oo += 'db_{k}: {v}\n'.format(k=k, v=v)
254 return oo
255
256
257 def auto_config() -> str:
258 """
259 Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.
260
261 option1:
262 DATACUBE_DB_URL postgresql://user:password@host/database
263
264 option2:
265 DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}
266
267 option3:
268 default config
269 """
270 cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)
271 cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'
272
273 if cfg_path.exists():
274 return str(cfg_path)
275
276 opts = parse_env_params()
277
278 if len(opts) == 0:
279 opts['hostname'] = ''
280 opts['database'] = 'datacube'
281
282 cfg_text = render_dc_config(opts)
283 with open(str(cfg_path), 'wt') as f:
284 f.write(cfg_text)
285
286 return str(cfg_path)
287
[end of datacube/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/datacube/config.py b/datacube/config.py
--- a/datacube/config.py
+++ b/datacube/config.py
@@ -233,7 +233,7 @@
for k in DB_KEYS}
return {k: v
for k, v in params.items()
- if v is not None}
+ if v is not None and v != ""}
def _cfg_from_env_opts(opts: Dict[str, str],
|
{"golden_diff": "diff --git a/datacube/config.py b/datacube/config.py\n--- a/datacube/config.py\n+++ b/datacube/config.py\n@@ -233,7 +233,7 @@\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n- if v is not None}\n+ if v is not None and v != \"\"}\n \n \n def _cfg_from_env_opts(opts: Dict[str, str],\n", "issue": "If DB_PORT is not set, config process sets port to an empty string\nI have an existing environment that sets up the datacube connection using this:\r\n```\r\n - DB_HOSTNAME=host.docker.internal\r\n - DB_USERNAME=opendatacube\r\n - DB_PASSWORD=opendatacubepassword\r\n - DB_DATABASE=opendatacube\r\n```\r\n\r\nand with the new changes to read config from environment variables over the config file, the port is required to be set with `DB_PORT=5432`.\r\n\r\nExpected behaviour was that if the port is blank it is set to the default for Postgres.\r\n\r\nhttps://github.com/opendatacube/datacube-core/blob/8481d907b198a1c8946326b8b70625a9a8523a12/datacube/config.py#L265\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nUser configuration.\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport configparser\nfrom urllib.parse import unquote_plus, urlparse\nfrom typing import Optional, Iterable, Union, Any, Tuple, Dict\n\nPathLike = Union[str, 'os.PathLike[Any]']\n\n\nENVIRONMENT_VARNAME = 'DATACUBE_CONFIG_PATH'\n#: Config locations in order. Properties found in latter locations override\n#: earlier ones.\n#:\n#: - `/etc/datacube.conf`\n#: - file at `$DATACUBE_CONFIG_PATH` environment variable\n#: - `~/.datacube.conf`\n#: - `datacube.conf`\nDEFAULT_CONF_PATHS = tuple(p for p in ['/etc/datacube.conf',\n os.environ.get(ENVIRONMENT_VARNAME, ''),\n str(os.path.expanduser(\"~/.datacube.conf\")),\n 'datacube.conf'] if len(p) > 0)\n\nDEFAULT_ENV = 'default'\n\n# Default configuration options.\n_DEFAULT_CONF = \"\"\"\n[DEFAULT]\n# Blank implies localhost\ndb_hostname:\ndb_database: datacube\nindex_driver: default\n# If a connection is unused for this length of time, expect it to be invalidated.\ndb_connection_timeout: 60\n\n[user]\n# Which environment to use when none is specified explicitly.\n# note: will fail if default_environment points to non-existent section\n# default_environment: datacube\n\"\"\"\n\n#: Used in place of None as a default, when None is a valid but not default parameter to a function\n_UNSET = object()\n\n\ndef read_config(default_text: Optional[str] = None) -> configparser.ConfigParser:\n config = configparser.ConfigParser()\n if default_text is not None:\n config.read_string(default_text)\n return config\n\n\nclass LocalConfig(object):\n \"\"\"\n System configuration for the user.\n\n This loads from a set of possible configuration files which define the available environments.\n An environment contains connection details for a Data Cube Index, which provides access to\n available data.\n\n \"\"\"\n\n def __init__(self, config: configparser.ConfigParser,\n files_loaded: Optional[Iterable[str]] = None,\n env: Optional[str] = None):\n \"\"\"\n Datacube environment resolution precedence is:\n 1. Supplied as a function argument `env`\n 2. DATACUBE_ENVIRONMENT environment variable\n 3. user.default_environment option in the config\n 4. 'default' or 'datacube' whichever is present\n\n If environment is supplied by any of the first 3 methods is not present\n in the config, then throw an exception.\n \"\"\"\n self._config = config\n self.files_loaded = [] if files_loaded is None else list(iter(files_loaded))\n\n if env is None:\n env = os.environ.get('DATACUBE_ENVIRONMENT',\n config.get('user', 'default_environment', fallback=None))\n\n # If the user specifies a particular env, we either want to use it or Fail\n if env:\n if config.has_section(env):\n self._env = env\n # All is good\n return\n else:\n raise ValueError('No config section found for environment %r' % (env,))\n else:\n # If an env hasn't been specifically selected, we can fall back defaults\n fallbacks = [DEFAULT_ENV, 'datacube']\n for fallback_env in fallbacks:\n if config.has_section(fallback_env):\n self._env = fallback_env\n return\n raise ValueError('No ODC environment, checked configurations for %s' % fallbacks)\n\n @classmethod\n def find(cls,\n paths: Optional[Union[str, Iterable[PathLike]]] = None,\n env: Optional[str] = None) -> 'LocalConfig':\n \"\"\"\n Find config from environment variables or possible filesystem locations.\n\n 'env' is which environment to use from the config: it corresponds to the name of a\n config section\n \"\"\"\n config = read_config(_DEFAULT_CONF)\n\n if paths is None:\n if env is None:\n env_opts = parse_env_params()\n if env_opts:\n return _cfg_from_env_opts(env_opts, config)\n\n paths = DEFAULT_CONF_PATHS\n\n if isinstance(paths, str) or hasattr(paths, '__fspath__'): # Use os.PathLike in 3.6+\n paths = [str(paths)]\n\n files_loaded = config.read(str(p) for p in paths if p)\n\n return LocalConfig(\n config,\n files_loaded=files_loaded,\n env=env,\n )\n\n def get(self, item: str, fallback=_UNSET):\n if fallback == _UNSET:\n return self._config.get(self._env, item)\n else:\n return self._config.get(self._env, item, fallback=fallback)\n\n def __getitem__(self, item: str):\n return self.get(item, fallback=None)\n\n def __str__(self) -> str:\n return \"LocalConfig<loaded_from={}, environment={!r}, config={}>\".format(\n self.files_loaded or 'defaults',\n self._env,\n dict(self._config[self._env]),\n )\n\n def __repr__(self) -> str:\n return str(self)\n\n\nOPTIONS = {'reproject_threads': 4}\n\n\n#: pylint: disable=invalid-name\nclass set_options(object):\n \"\"\"Set global state within a controlled context\n\n Currently, the only supported options are:\n * reproject_threads: The number of threads to use when reprojecting\n\n You can use ``set_options`` either as a context manager::\n\n with datacube.set_options(reproject_threads=16):\n ...\n\n Or to set global options::\n\n datacube.set_options(reproject_threads=16)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = OPTIONS.copy()\n OPTIONS.update(kwargs)\n\n def __enter__(self):\n return\n\n def __exit__(self, exc_type, value, traceback):\n OPTIONS.clear()\n OPTIONS.update(self.old)\n\n\nDB_KEYS = ('hostname', 'port', 'database', 'username', 'password')\n\n\ndef parse_connect_url(url: str) -> Dict[str, str]:\n \"\"\" Extract database,hostname,port,username,password from db URL.\n\n Example: postgresql://username:password@hostname:port/database\n\n For local password-less db use `postgresql:///<your db>`\n \"\"\"\n def split2(s: str, separator: str) -> Tuple[str, str]:\n i = s.find(separator)\n return (s, '') if i < 0 else (s[:i], s[i+1:])\n\n _, netloc, path, *_ = urlparse(url)\n\n db = path[1:] if path else ''\n if '@' in netloc:\n (user, password), (host, port) = (split2(p, ':') for p in split2(netloc, '@'))\n else:\n user, password = '', ''\n host, port = split2(netloc, ':')\n\n oo = dict(hostname=host, database=db)\n\n if port:\n oo['port'] = port\n if password:\n oo['password'] = unquote_plus(password)\n if user:\n oo['username'] = user\n return oo\n\n\ndef parse_env_params() -> Dict[str, str]:\n \"\"\"\n - Extract parameters from DATACUBE_DB_URL if present\n - Else look for DB_HOSTNAME, DB_USERNAME, DB_PASSWORD, DB_DATABASE\n - Return {} otherwise\n \"\"\"\n\n db_url = os.environ.get('DATACUBE_DB_URL', None)\n if db_url is not None:\n return parse_connect_url(db_url)\n\n params = {k: os.environ.get('DB_{}'.format(k.upper()), None)\n for k in DB_KEYS}\n return {k: v\n for k, v in params.items()\n if v is not None}\n\n\ndef _cfg_from_env_opts(opts: Dict[str, str],\n base: configparser.ConfigParser) -> LocalConfig:\n base['default'] = {'db_'+k: v for k, v in opts.items()}\n return LocalConfig(base, files_loaded=[], env='default')\n\n\ndef render_dc_config(params: Dict[str, Any],\n section_name: str = 'default') -> str:\n \"\"\" Render output of parse_env_params to a string that can be written to config file.\n \"\"\"\n oo = '[{}]\\n'.format(section_name)\n for k in DB_KEYS:\n v = params.get(k, None)\n if v is not None:\n oo += 'db_{k}: {v}\\n'.format(k=k, v=v)\n return oo\n\n\ndef auto_config() -> str:\n \"\"\"\n Render config to $DATACUBE_CONFIG_PATH or ~/.datacube.conf, but only if doesn't exist.\n\n option1:\n DATACUBE_DB_URL postgresql://user:password@host/database\n\n option2:\n DB_{HOSTNAME|PORT|USERNAME|PASSWORD|DATABASE}\n\n option3:\n default config\n \"\"\"\n cfg_path = os.environ.get('DATACUBE_CONFIG_PATH', None)\n cfg_path = Path(cfg_path) if cfg_path else Path.home()/'.datacube.conf'\n\n if cfg_path.exists():\n return str(cfg_path)\n\n opts = parse_env_params()\n\n if len(opts) == 0:\n opts['hostname'] = ''\n opts['database'] = 'datacube'\n\n cfg_text = render_dc_config(opts)\n with open(str(cfg_path), 'wt') as f:\n f.write(cfg_text)\n\n return str(cfg_path)\n", "path": "datacube/config.py"}]}
| 3,546 | 98 |
gh_patches_debug_12277
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-2899
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make poetry version plugin to update the pyproject.toml as well not just the build
/kind bug
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
Currently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml
This makes it harder to easily decipher the version of the package.
**What did you expect to happen:**
Running poetry build or install should set the version of the package in pyproject.toml
</issue>
<code>
[start of python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py]
1 import ast
2 import subprocess
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from cleo.io.io import IO
7 from poetry.plugins.plugin import Plugin
8 from poetry.poetry import Poetry
9 from poetry.core.utils.helpers import module_name
10
11
12 class VersionPlugin(Plugin): # type: ignore
13 def activate(self, poetry: Poetry, io: IO) -> None:
14 poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(
15 "tool", {}
16 ).get("poetry-version-plugin")
17 if poetry_version_config is None:
18 return
19 version_source = poetry_version_config.get("source")
20 version_file_path = poetry_version_config.get("file_path")
21 if not version_source:
22 message = (
23 "<b>poetry-version-plugin</b>: No <b>source</b> configuration found in "
24 "[tool.poetry-version-plugin] in pyproject.toml, not extracting "
25 "dynamic version"
26 )
27 io.write_error_line(message)
28 raise RuntimeError(message)
29
30 # Update version using file
31 if version_source == "file" and version_file_path:
32 with open(version_file_path, "r") as version_file:
33 version = version_file.read().strip()
34 poetry.package._set_version(version)
35 io.write_line(
36 "<b>poetry-version-plugin</b>: New package "
37 "version is updated from given file "
38 f"version is: <b>{version}</b>"
39 )
40
41 if version_source == "init":
42 packages = poetry.local_config.get("packages")
43 if packages:
44 if len(packages) == 1:
45 package_name = packages[0]["include"]
46 else:
47 message = (
48 "<b>poetry-version-plugin</b>: More than one package set, "
49 "cannot extract dynamic version"
50 )
51 io.write_error_line(message)
52 raise RuntimeError(message)
53 else:
54 package_name = module_name(poetry.package.name)
55 init_path = Path(package_name) / "__init__.py"
56 if not init_path.is_file():
57 message = (
58 "<b>poetry-version-plugin</b>: __init__.py file not found at "
59 f"{init_path} cannot extract dynamic version"
60 )
61 io.write_error_line(message)
62 raise RuntimeError(message)
63 else:
64 io.write_line(
65 "<b>poetry-version-plugin</b>: Using __init__.py file at "
66 f"{init_path} for dynamic version"
67 )
68 tree = ast.parse(init_path.read_text())
69 for el in tree.body:
70 if isinstance(el, ast.Assign):
71 if len(el.targets) == 1:
72 target = el.targets[0]
73 if isinstance(target, ast.Name):
74 if target.id == "__version__":
75 value_node = el.value
76 if isinstance(value_node, ast.Constant):
77 version = value_node.value
78 elif isinstance(value_node, ast.Str):
79 version = value_node.s
80 else: # pragma: nocover
81 # This is actually covered by tests, but can't be
82 # reported by Coverage
83 # Ref: https://github.com/nedbat/coveragepy/issues/198
84 continue
85 io.write_line(
86 "<b>poetry-version-plugin</b>: Setting package "
87 "dynamic version to __version__ "
88 f"variable from __init__.py: <b>{version}</b>"
89 )
90 poetry.package._set_version(version)
91 return
92 message = (
93 "<b>poetry-version-plugin</b>: No valid __version__ variable found "
94 "in __init__.py, cannot extract dynamic version"
95 )
96 io.write_error_line(message)
97 raise RuntimeError(message)
98 elif version_source == "git-tag":
99 result = subprocess.run(
100 ["git", "describe", "--exact-match", "--tags", "HEAD"],
101 stdin=subprocess.PIPE,
102 stdout=subprocess.PIPE,
103 universal_newlines=True,
104 )
105 if result.returncode == 0:
106 tag = result.stdout.strip()
107 io.write_line(
108 "<b>poetry-version-plugin</b>: Git tag found, setting "
109 f"dynamic version to: {tag}"
110 )
111 poetry.package._set_version(tag)
112 return
113 else:
114 message = (
115 "<b>poetry-version-plugin</b>: No Git tag found, not "
116 "extracting dynamic version"
117 )
118 io.write_error_line(message)
119 raise RuntimeError(message)
120
[end of python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
@@ -37,6 +37,13 @@
"version is updated from given file "
f"version is: <b>{version}</b>"
)
+ content: dict[str, Any] = poetry.file.read()
+ poetry_content = content["tool"]["poetry"]
+ poetry_content["version"] = version
+ poetry.file.write(content)
+ io.write_line(
+ "<b>poetry-version-plugin</b>: New version updated in toml file"
+ )
if version_source == "init":
packages = poetry.local_config.get("packages")
|
{"golden_diff": "diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n@@ -37,6 +37,13 @@\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n+ content: dict[str, Any] = poetry.file.read()\n+ poetry_content = content[\"tool\"][\"poetry\"]\n+ poetry_content[\"version\"] = version\n+ poetry.file.write(content)\n+ io.write_line(\n+ \"<b>poetry-version-plugin</b>: New version updated in toml file\"\n+ )\n \n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n", "issue": "Make poetry version plugin to update the pyproject.toml as well not just the build\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nCurrently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml\r\nThis makes it harder to easily decipher the version of the package.\r\n\r\n**What did you expect to happen:**\r\nRunning poetry build or install should set the version of the package in pyproject.toml\r\n\r\n\n", "before_files": [{"content": "import ast\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom cleo.io.io import IO\nfrom poetry.plugins.plugin import Plugin\nfrom poetry.poetry import Poetry\nfrom poetry.core.utils.helpers import module_name\n\n\nclass VersionPlugin(Plugin): # type: ignore\n def activate(self, poetry: Poetry, io: IO) -> None:\n poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(\n \"tool\", {}\n ).get(\"poetry-version-plugin\")\n if poetry_version_config is None:\n return\n version_source = poetry_version_config.get(\"source\")\n version_file_path = poetry_version_config.get(\"file_path\")\n if not version_source:\n message = (\n \"<b>poetry-version-plugin</b>: No <b>source</b> configuration found in \"\n \"[tool.poetry-version-plugin] in pyproject.toml, not extracting \"\n \"dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n\n # Update version using file\n if version_source == \"file\" and version_file_path:\n with open(version_file_path, \"r\") as version_file:\n version = version_file.read().strip()\n poetry.package._set_version(version)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New package \"\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n\n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n if packages:\n if len(packages) == 1:\n package_name = packages[0][\"include\"]\n else:\n message = (\n \"<b>poetry-version-plugin</b>: More than one package set, \"\n \"cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n package_name = module_name(poetry.package.name)\n init_path = Path(package_name) / \"__init__.py\"\n if not init_path.is_file():\n message = (\n \"<b>poetry-version-plugin</b>: __init__.py file not found at \"\n f\"{init_path} cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n io.write_line(\n \"<b>poetry-version-plugin</b>: Using __init__.py file at \"\n f\"{init_path} for dynamic version\"\n )\n tree = ast.parse(init_path.read_text())\n for el in tree.body:\n if isinstance(el, ast.Assign):\n if len(el.targets) == 1:\n target = el.targets[0]\n if isinstance(target, ast.Name):\n if target.id == \"__version__\":\n value_node = el.value\n if isinstance(value_node, ast.Constant):\n version = value_node.value\n elif isinstance(value_node, ast.Str):\n version = value_node.s\n else: # pragma: nocover\n # This is actually covered by tests, but can't be\n # reported by Coverage\n # Ref: https://github.com/nedbat/coveragepy/issues/198\n continue\n io.write_line(\n \"<b>poetry-version-plugin</b>: Setting package \"\n \"dynamic version to __version__ \"\n f\"variable from __init__.py: <b>{version}</b>\"\n )\n poetry.package._set_version(version)\n return\n message = (\n \"<b>poetry-version-plugin</b>: No valid __version__ variable found \"\n \"in __init__.py, cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n elif version_source == \"git-tag\":\n result = subprocess.run(\n [\"git\", \"describe\", \"--exact-match\", \"--tags\", \"HEAD\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if result.returncode == 0:\n tag = result.stdout.strip()\n io.write_line(\n \"<b>poetry-version-plugin</b>: Git tag found, setting \"\n f\"dynamic version to: {tag}\"\n )\n poetry.package._set_version(tag)\n return\n else:\n message = (\n \"<b>poetry-version-plugin</b>: No Git tag found, not \"\n \"extracting dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n", "path": "python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py"}]}
| 1,858 | 196 |
gh_patches_debug_13121
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-1281
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
errors.py: Add support for JSON content that has `errors` instead of `detail` or `details`
Thanks for stopping by to let us know something could be better!
**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.
**Is your feature request related to a problem? Please describe.**
We have been using [errors.py](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py) to raise HttpError errors. Unfortunately, many of our instances have content that matches the following format:
```
{
error: {
errors: {
}
}
}
```
versus what [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) is looking for i.e. only one of `detail`, `details`, or `message`
This results us seeing the errors being logged but not the details:
```
<HttpError 503 when requesting url returned "Internal error. Please try again.">
```
**Describe the solution you'd like**
The solution involves modifying [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) to also support `error_detail_keyword` with an additional value of `errors`. This modifies the order to: `detail`, `details`, `errors`, `message`
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
</issue>
<code>
[start of googleapiclient/errors.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Errors for the library.
16
17 All exceptions defined by the library
18 should be defined in this file.
19 """
20 from __future__ import absolute_import
21
22 __author__ = "[email protected] (Joe Gregorio)"
23
24 import json
25
26 from googleapiclient import _helpers as util
27
28
29 class Error(Exception):
30 """Base error for this module."""
31
32 pass
33
34
35 class HttpError(Error):
36 """HTTP data was invalid or unexpected."""
37
38 @util.positional(3)
39 def __init__(self, resp, content, uri=None):
40 self.resp = resp
41 if not isinstance(content, bytes):
42 raise TypeError("HTTP content should be bytes")
43 self.content = content
44 self.uri = uri
45 self.error_details = ""
46 self._get_reason()
47
48 @property
49 def status_code(self):
50 """Return the HTTP status code from the response content."""
51 return self.resp.status
52
53 def _get_reason(self):
54 """Calculate the reason for the error from the response content."""
55 reason = self.resp.reason
56 try:
57 try:
58 data = json.loads(self.content.decode("utf-8"))
59 except json.JSONDecodeError:
60 # In case it is not json
61 data = self.content.decode("utf-8")
62 if isinstance(data, dict):
63 reason = data["error"]["message"]
64 error_detail_keyword = next((kw for kw in ["detail", "details", "message"] if kw in data["error"]), "")
65 if error_detail_keyword:
66 self.error_details = data["error"][error_detail_keyword]
67 elif isinstance(data, list) and len(data) > 0:
68 first_error = data[0]
69 reason = first_error["error"]["message"]
70 if "details" in first_error["error"]:
71 self.error_details = first_error["error"]["details"]
72 else:
73 self.error_details = data
74 except (ValueError, KeyError, TypeError):
75 pass
76 if reason is None:
77 reason = ""
78 return reason
79
80 def __repr__(self):
81 reason = self._get_reason()
82 if self.error_details:
83 return '<HttpError %s when requesting %s returned "%s". Details: "%s">' % (
84 self.resp.status,
85 self.uri,
86 reason.strip(),
87 self.error_details,
88 )
89 elif self.uri:
90 return '<HttpError %s when requesting %s returned "%s">' % (
91 self.resp.status,
92 self.uri,
93 self._get_reason().strip(),
94 )
95 else:
96 return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
97
98 __str__ = __repr__
99
100
101 class InvalidJsonError(Error):
102 """The JSON returned could not be parsed."""
103
104 pass
105
106
107 class UnknownFileType(Error):
108 """File type unknown or unexpected."""
109
110 pass
111
112
113 class UnknownLinkType(Error):
114 """Link type unknown or unexpected."""
115
116 pass
117
118
119 class UnknownApiNameOrVersion(Error):
120 """No API with that name and version exists."""
121
122 pass
123
124
125 class UnacceptableMimeTypeError(Error):
126 """That is an unacceptable mimetype for this operation."""
127
128 pass
129
130
131 class MediaUploadSizeError(Error):
132 """Media is larger than the method can accept."""
133
134 pass
135
136
137 class ResumableUploadError(HttpError):
138 """Error occurred during resumable upload."""
139
140 pass
141
142
143 class InvalidChunkSizeError(Error):
144 """The given chunksize is not valid."""
145
146 pass
147
148
149 class InvalidNotificationError(Error):
150 """The channel Notification is invalid."""
151
152 pass
153
154
155 class BatchError(HttpError):
156 """Error occurred during batch operations."""
157
158 @util.positional(2)
159 def __init__(self, reason, resp=None, content=None):
160 self.resp = resp
161 self.content = content
162 self.reason = reason
163
164 def __repr__(self):
165 if getattr(self.resp, "status", None) is None:
166 return '<BatchError "%s">' % (self.reason)
167 else:
168 return '<BatchError %s "%s">' % (self.resp.status, self.reason)
169
170 __str__ = __repr__
171
172
173 class UnexpectedMethodError(Error):
174 """Exception raised by RequestMockBuilder on unexpected calls."""
175
176 @util.positional(1)
177 def __init__(self, methodId=None):
178 """Constructor for an UnexpectedMethodError."""
179 super(UnexpectedMethodError, self).__init__(
180 "Received unexpected call %s" % methodId
181 )
182
183
184 class UnexpectedBodyError(Error):
185 """Exception raised by RequestMockBuilder on unexpected bodies."""
186
187 def __init__(self, expected, provided):
188 """Constructor for an UnexpectedMethodError."""
189 super(UnexpectedBodyError, self).__init__(
190 "Expected: [%s] - Provided: [%s]" % (expected, provided)
191 )
192
[end of googleapiclient/errors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py
--- a/googleapiclient/errors.py
+++ b/googleapiclient/errors.py
@@ -61,7 +61,7 @@
data = self.content.decode("utf-8")
if isinstance(data, dict):
reason = data["error"]["message"]
- error_detail_keyword = next((kw for kw in ["detail", "details", "message"] if kw in data["error"]), "")
+ error_detail_keyword = next((kw for kw in ["detail", "details", "errors", "message"] if kw in data["error"]), "")
if error_detail_keyword:
self.error_details = data["error"][error_detail_keyword]
elif isinstance(data, list) and len(data) > 0:
|
{"golden_diff": "diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py\n--- a/googleapiclient/errors.py\n+++ b/googleapiclient/errors.py\n@@ -61,7 +61,7 @@\n data = self.content.decode(\"utf-8\")\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n- error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"message\"] if kw in data[\"error\"]), \"\")\n+ error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"errors\", \"message\"] if kw in data[\"error\"]), \"\")\n if error_detail_keyword:\n self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n", "issue": "errors.py: Add support for JSON content that has `errors` instead of `detail` or `details`\nThanks for stopping by to let us know something could be better!\r\n\r\n**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.\r\n\r\n **Is your feature request related to a problem? Please describe.**\r\n\r\nWe have been using [errors.py](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py) to raise HttpError errors. Unfortunately, many of our instances have content that matches the following format:\r\n```\r\n{\r\n error: {\r\n errors: {\r\n }\r\n }\r\n}\r\n```\r\nversus what [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) is looking for i.e. only one of `detail`, `details`, or `message`\r\nThis results us seeing the errors being logged but not the details:\r\n\r\n```\r\n<HttpError 503 when requesting url returned \"Internal error. Please try again.\">\r\n```\r\n\r\n **Describe the solution you'd like**\r\n\r\nThe solution involves modifying [_get_reason](https://github.com/googleapis/google-api-python-client/blob/master/googleapiclient/errors.py#L64) to also support `error_detail_keyword` with an additional value of `errors`. This modifies the order to: `detail`, `details`, `errors`, `message`\r\n\r\n **Describe alternatives you've considered**\r\nA clear and concise description of any alternative solutions or features you've considered.\r\n **Additional context**\r\nAdd any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"\nfrom __future__ import absolute_import\n\n__author__ = \"[email protected] (Joe Gregorio)\"\n\nimport json\n\nfrom googleapiclient import _helpers as util\n\n\nclass Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n\n pass\n\n\nclass HttpError(Error):\n \"\"\"HTTP data was invalid or unexpected.\"\"\"\n\n @util.positional(3)\n def __init__(self, resp, content, uri=None):\n self.resp = resp\n if not isinstance(content, bytes):\n raise TypeError(\"HTTP content should be bytes\")\n self.content = content\n self.uri = uri\n self.error_details = \"\"\n self._get_reason()\n\n @property\n def status_code(self):\n \"\"\"Return the HTTP status code from the response content.\"\"\"\n return self.resp.status\n\n def _get_reason(self):\n \"\"\"Calculate the reason for the error from the response content.\"\"\"\n reason = self.resp.reason\n try:\n try:\n data = json.loads(self.content.decode(\"utf-8\"))\n except json.JSONDecodeError:\n # In case it is not json\n data = self.content.decode(\"utf-8\")\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"message\"] if kw in data[\"error\"]), \"\")\n if error_detail_keyword:\n self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n first_error = data[0]\n reason = first_error[\"error\"][\"message\"]\n if \"details\" in first_error[\"error\"]:\n self.error_details = first_error[\"error\"][\"details\"]\n else:\n self.error_details = data\n except (ValueError, KeyError, TypeError):\n pass\n if reason is None:\n reason = \"\"\n return reason\n\n def __repr__(self):\n reason = self._get_reason()\n if self.error_details:\n return '<HttpError %s when requesting %s returned \"%s\". Details: \"%s\">' % (\n self.resp.status,\n self.uri,\n reason.strip(),\n self.error_details,\n )\n elif self.uri:\n return '<HttpError %s when requesting %s returned \"%s\">' % (\n self.resp.status,\n self.uri,\n self._get_reason().strip(),\n )\n else:\n return '<HttpError %s \"%s\">' % (self.resp.status, self._get_reason())\n\n __str__ = __repr__\n\n\nclass InvalidJsonError(Error):\n \"\"\"The JSON returned could not be parsed.\"\"\"\n\n pass\n\n\nclass UnknownFileType(Error):\n \"\"\"File type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownLinkType(Error):\n \"\"\"Link type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownApiNameOrVersion(Error):\n \"\"\"No API with that name and version exists.\"\"\"\n\n pass\n\n\nclass UnacceptableMimeTypeError(Error):\n \"\"\"That is an unacceptable mimetype for this operation.\"\"\"\n\n pass\n\n\nclass MediaUploadSizeError(Error):\n \"\"\"Media is larger than the method can accept.\"\"\"\n\n pass\n\n\nclass ResumableUploadError(HttpError):\n \"\"\"Error occurred during resumable upload.\"\"\"\n\n pass\n\n\nclass InvalidChunkSizeError(Error):\n \"\"\"The given chunksize is not valid.\"\"\"\n\n pass\n\n\nclass InvalidNotificationError(Error):\n \"\"\"The channel Notification is invalid.\"\"\"\n\n pass\n\n\nclass BatchError(HttpError):\n \"\"\"Error occurred during batch operations.\"\"\"\n\n @util.positional(2)\n def __init__(self, reason, resp=None, content=None):\n self.resp = resp\n self.content = content\n self.reason = reason\n\n def __repr__(self):\n if getattr(self.resp, \"status\", None) is None:\n return '<BatchError \"%s\">' % (self.reason)\n else:\n return '<BatchError %s \"%s\">' % (self.resp.status, self.reason)\n\n __str__ = __repr__\n\n\nclass UnexpectedMethodError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected calls.\"\"\"\n\n @util.positional(1)\n def __init__(self, methodId=None):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedMethodError, self).__init__(\n \"Received unexpected call %s\" % methodId\n )\n\n\nclass UnexpectedBodyError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected bodies.\"\"\"\n\n def __init__(self, expected, provided):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedBodyError, self).__init__(\n \"Expected: [%s] - Provided: [%s]\" % (expected, provided)\n )\n", "path": "googleapiclient/errors.py"}]}
| 2,538 | 170 |
gh_patches_debug_3921
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5427
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
embed/animated fails with "did not find model"
`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.
</issue>
<code>
[start of examples/embed/animated.py]
1 """ To view this example, first start a Bokeh server:
2
3 bokeh serve --allow-websocket-origin=localhost:8000
4
5 And then load the example into the Bokeh server by
6 running the script:
7
8 python animated.py
9
10 in this directory. Finally, start a simple web server
11 by running:
12
13 python -m SimpleHTTPServer (python 2)
14
15 or
16
17 python -m http.server (python 3)
18
19 in this directory. Navigate to
20
21 http://localhost:8000/animated.html
22
23 """
24 from __future__ import print_function
25
26 from numpy import pi, cos, sin, linspace, roll
27
28 from bokeh.client import push_session
29 from bokeh.embed import autoload_server
30 from bokeh.plotting import figure, curdoc
31
32 M = 5
33 N = M*10 + 1
34 r_base = 8
35 theta = linspace(0, 2*pi, N)
36 r_x = linspace(0, 6*pi, N-1)
37 rmin = r_base - cos(r_x) - 1
38 rmax = r_base + sin(r_x) + 1
39
40 colors = ["FFFFCC", "#C7E9B4", "#7FCDBB", "#41B6C4", "#2C7FB8",
41 "#253494", "#2C7FB8", "#41B6C4", "#7FCDBB", "#C7E9B4"] * 5
42
43 # figure() function auto-adds the figure to curdoc()
44 p = figure(x_range=(-11, 11), y_range=(-11, 11))
45 r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],
46 fill_color=colors, line_color="white")
47
48 # open a session which will keep our local doc in sync with server
49 session = push_session(curdoc())
50
51 html = """
52 <html>
53 <head></head>
54 <body>
55 %s
56 </body>
57 </html>
58 """ % autoload_server(p, session_id=session.id)
59
60 with open("animated.html", "w+") as f:
61 f.write(html)
62
63 print(__doc__)
64
65 ds = r.data_source
66
67 def update():
68 rmin = roll(ds.data["inner_radius"], 1)
69 rmax = roll(ds.data["outer_radius"], -1)
70 ds.data.update(inner_radius=rmin, outer_radius=rmax)
71
72 curdoc().add_periodic_callback(update, 30)
73
74 session.loop_until_closed() # run forever
75
[end of examples/embed/animated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/embed/animated.py b/examples/embed/animated.py
--- a/examples/embed/animated.py
+++ b/examples/embed/animated.py
@@ -45,6 +45,9 @@
r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],
fill_color=colors, line_color="white")
+# add the plot to curdoc
+curdoc().add_root(p)
+
# open a session which will keep our local doc in sync with server
session = push_session(curdoc())
|
{"golden_diff": "diff --git a/examples/embed/animated.py b/examples/embed/animated.py\n--- a/examples/embed/animated.py\n+++ b/examples/embed/animated.py\n@@ -45,6 +45,9 @@\n r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n \n+# add the plot to curdoc\n+curdoc().add_root(p)\n+\n # open a session which will keep our local doc in sync with server\n session = push_session(curdoc())\n", "issue": "embed/animated fails with \"did not find model\"\n`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.\n\n", "before_files": [{"content": "\"\"\" To view this example, first start a Bokeh server:\n\n bokeh serve --allow-websocket-origin=localhost:8000\n\nAnd then load the example into the Bokeh server by\nrunning the script:\n\n python animated.py\n\nin this directory. Finally, start a simple web server\nby running:\n\n python -m SimpleHTTPServer (python 2)\n\nor\n\n python -m http.server (python 3)\n\nin this directory. Navigate to\n\n http://localhost:8000/animated.html\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom numpy import pi, cos, sin, linspace, roll\n\nfrom bokeh.client import push_session\nfrom bokeh.embed import autoload_server\nfrom bokeh.plotting import figure, curdoc\n\nM = 5\nN = M*10 + 1\nr_base = 8\ntheta = linspace(0, 2*pi, N)\nr_x = linspace(0, 6*pi, N-1)\nrmin = r_base - cos(r_x) - 1\nrmax = r_base + sin(r_x) + 1\n\ncolors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n# figure() function auto-adds the figure to curdoc()\np = figure(x_range=(-11, 11), y_range=(-11, 11))\nr = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n\n# open a session which will keep our local doc in sync with server\nsession = push_session(curdoc())\n\nhtml = \"\"\"\n<html>\n <head></head>\n <body>\n %s\n </body>\n</html>\n\"\"\" % autoload_server(p, session_id=session.id)\n\nwith open(\"animated.html\", \"w+\") as f:\n f.write(html)\n\nprint(__doc__)\n\nds = r.data_source\n\ndef update():\n rmin = roll(ds.data[\"inner_radius\"], 1)\n rmax = roll(ds.data[\"outer_radius\"], -1)\n ds.data.update(inner_radius=rmin, outer_radius=rmax)\n\ncurdoc().add_periodic_callback(update, 30)\n\nsession.loop_until_closed() # run forever\n", "path": "examples/embed/animated.py"}]}
| 1,273 | 124 |
gh_patches_debug_18219
|
rasdani/github-patches
|
git_diff
|
google-deepmind__optax-111
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement log(cosh()) loss function
log-cosh is a doubly differentiable alternative to the huber loss. A naive implementation is prone to overflow (since cosh has an e^x term), so I think it'd be a useful addition to the library. Plus, it's implemented in other libraries, such as [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/keras/losses/log_cosh).
If this sounds like a relevant addition, I'd be happy to contribute it!
</issue>
<code>
[start of optax/_src/loss.py]
1 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Standard losses used in optimisation.
16
17 We provide implementations of the most canonical losses used in deep
18 learning. These operate transparently on batches, and do not perform any
19 reduction over the batch dimensions, leaving it to the user to, for instance,
20 mean or sum losses across batch dimensions.
21 """
22
23 from typing import Optional
24
25 import chex
26 import jax
27 import jax.numpy as jnp
28
29 from optax._src import utils
30
31
32 def l2_loss(
33 predictions: chex.Array,
34 targets: Optional[chex.Array] = None,
35 ) -> chex.Array:
36 """Calculates the L2 loss for a set of predictions.
37
38 Note: the 0.5 term is standard in "Pattern Recognition and Machine Learning"
39 by Bishop, but not "The Elements of Statistical Learning" by Tibshirani.
40
41 References:
42 [Chris Bishop, 2006](https://bit.ly/3eeP0ga)
43
44 Args:
45 predictions: a vector of arbitrary shape.
46 targets: a vector of shape compatible with predictions; if not provides
47 then it is assumed to be zero.
48
49 Returns:
50 the squared error loss.
51 """
52 chex.assert_type([predictions], float)
53 errors = (predictions - targets) if (targets is not None) else predictions
54 return 0.5 * (errors)**2
55
56
57 def huber_loss(
58 predictions: chex.Array,
59 targets: Optional[chex.Array] = None,
60 delta: float = 1.) -> chex.Array:
61 """Huber loss, similar to L2 loss close to zero, L1 loss away from zero.
62
63 If gradient descent is applied to the `huber loss`, it is equivalent to
64 clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass.
65
66 References:
67 [Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732)
68
69 Args:
70 predictions: a vector of arbitrary shape.
71 targets: a vector of shape compatible with predictions; if not provides
72 then it is assumed to be zero.
73 delta: the bounds for the huber loss transformation, defaults at 1.
74
75 Returns:
76 a vector of same shape of `x`.
77 """
78 chex.assert_type([predictions], float)
79 errors = (predictions - targets) if (targets is not None) else predictions
80 # 0.5 * err^2 if |err| <= d
81 # 0.5 * d^2 + d * (|err| - d) if |err| > d
82 abs_errors = jnp.abs(errors)
83 quadratic = jnp.minimum(abs_errors, delta)
84 # Same as max(abs_x - delta, 0) but avoids potentially doubling gradient.
85 linear = abs_errors - quadratic
86 return 0.5 * quadratic ** 2 + delta * linear
87
88
89 def smooth_labels(
90 labels: chex.Array,
91 alpha: float,
92 ) -> jnp.ndarray:
93 """Apply label smoothing.
94
95 Label smoothing is often used in combination with a cross-entropy loss.
96 Smoothed labels favour small logit gaps, and it has been shown that this can
97 provide better model calibration by preventing overconfident predictions.
98
99 References:
100 [Müller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf)
101
102 Args:
103 labels: one hot labels to be smoothed.
104 alpha: the smoothing factor, the greedy category with be assigned
105 probability `(1-alpha) + alpha / num_categories`
106
107 Returns:
108 a smoothed version of the one hot input labels.
109
110 """
111 chex.assert_type([labels], float)
112 num_categories = labels.shape[-1]
113 return (1.0 - alpha) * labels + alpha / num_categories
114
115
116 def sigmoid_binary_cross_entropy(logits, labels):
117 """Computes sigmoid cross entropy given logits and multiple class labels.
118
119 Measures the probability error in discrete classification tasks in which
120 each class is an independent binary prediction and different classes are
121 not mutually exclusive. This may be used for multilabel image classification
122 for instance a model may predict that an image contains both a cat and a dog.
123
124 References:
125 [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
126
127 Args:
128 logits: unnormalized log probabilities.
129 labels: the probability for that class.
130
131 Returns:
132 a sigmoid cross entropy loss.
133 """
134 chex.assert_equal_shape([logits, labels])
135 chex.assert_type([logits, labels], float)
136 log_p = jax.nn.log_sigmoid(logits)
137 # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable
138 log_not_p = jax.nn.log_sigmoid(-logits)
139 return -labels * log_p - (1. - labels) * log_not_p
140
141
142 def softmax_cross_entropy(
143 logits: chex.Array,
144 labels: chex.Array,
145 ) -> chex.Array:
146 """Computes the softmax cross entropy between sets of logits and labels.
147
148 Measures the probability error in discrete classification tasks in which
149 the classes are mutually exclusive (each entry is in exactly one class).
150 For example, each CIFAR-10 image is labeled with one and only one label:
151 an image can be a dog or a truck, but not both.
152
153 References:
154 [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)
155
156 Args:
157 logits: unnormalized log probabilities.
158 labels: a valid probability distribution (non-negative, sum to 1), e.g a
159 one hot encoding of which class is the correct one for each input.
160
161 Returns:
162 the cross entropy loss.
163 """
164 chex.assert_equal_shape([logits, labels])
165 chex.assert_type([logits, labels], float)
166 return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1)
167
168
169 def cosine_similarity(
170 predictions: chex.Array,
171 targets: chex.Array,
172 epsilon: float = 0.,
173 ) -> chex.Array:
174 r"""Computes the cosine similarity between targets and predictions.
175
176 The cosine **similarity** is a measure of similarity between vectors defined
177 as the cosine of the angle between them, which is also the inner product of
178 those vectors normalized to have unit norm.
179
180 References:
181 [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)
182
183 Args:
184 predictions: The predicted vector.
185 targets: Ground truth target vector.
186 epsilon: minimum norm for terms in the denominator of the cosine similarity.
187
188 Returns:
189 cosine similarity values.
190 """
191 chex.assert_equal_shape([targets, predictions])
192 chex.assert_type([targets, predictions], float)
193 # vectorize norm fn, to treat all dimensions except the last as batch dims.
194 batched_norm_fn = jnp.vectorize(
195 utils.safe_norm, signature='(k)->()', excluded={1})
196 # normalise the last dimension of targets and predictions.
197 unit_targets = targets / jnp.expand_dims(
198 batched_norm_fn(targets, epsilon), axis=-1)
199 unit_predictions = predictions / jnp.expand_dims(
200 batched_norm_fn(predictions, epsilon), axis=-1)
201 # return cosine similarity.
202 return jnp.sum(unit_targets * unit_predictions, axis=-1)
203
204
205 def cosine_distance(
206 predictions: chex.Array,
207 targets: chex.Array,
208 epsilon: float = 0.,
209 ) -> chex.Array:
210 r"""Computes the cosine distance between targets and predictions.
211
212 The cosine **distance**, implemented here, measures the **dissimilarity**
213 of two vectors as the opposite of cosine **similarity**: `1 - cos(\theta)`.
214
215 References:
216 [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)
217
218 Args:
219 predictions: The predicted vector.
220 targets: Ground truth target vector.
221 epsilon: minimum norm for terms in the denominator of the cosine similarity.
222
223 Returns:
224 cosine similarity values.
225 """
226 chex.assert_equal_shape([targets, predictions])
227 chex.assert_type([targets, predictions], float)
228 # cosine distance = 1 - cosine similarity.
229 return 1. - cosine_similarity(predictions, targets, epsilon)
230
[end of optax/_src/loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optax/_src/loss.py b/optax/_src/loss.py
--- a/optax/_src/loss.py
+++ b/optax/_src/loss.py
@@ -227,3 +227,29 @@
chex.assert_type([targets, predictions], float)
# cosine distance = 1 - cosine similarity.
return 1. - cosine_similarity(predictions, targets, epsilon)
+
+
+def log_cosh(
+ predictions: chex.Array,
+ targets: Optional[chex.Array] = None,
+) -> chex.Array:
+ """Calculates the log-cosh loss for a set of predictions.
+
+ log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)`
+ for large x. It is a twice differentiable alternative to the Huber loss.
+
+ References:
+ [Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym)
+
+ Args:
+ predictions: a vector of arbitrary shape.
+ targets: a vector of shape compatible with predictions; if not provided
+ then it is assumed to be zero.
+
+ Returns:
+ the log-cosh loss.
+ """
+ chex.assert_type([predictions], float)
+ errors = (predictions - targets) if (targets is not None) else predictions
+ # log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2)
+ return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype)
|
{"golden_diff": "diff --git a/optax/_src/loss.py b/optax/_src/loss.py\n--- a/optax/_src/loss.py\n+++ b/optax/_src/loss.py\n@@ -227,3 +227,29 @@\n chex.assert_type([targets, predictions], float)\n # cosine distance = 1 - cosine similarity.\n return 1. - cosine_similarity(predictions, targets, epsilon)\n+\n+\n+def log_cosh(\n+ predictions: chex.Array,\n+ targets: Optional[chex.Array] = None,\n+) -> chex.Array:\n+ \"\"\"Calculates the log-cosh loss for a set of predictions.\n+\n+ log(cosh(x)) is approximately `(x**2) / 2` for small x and `abs(x) - log(2)`\n+ for large x. It is a twice differentiable alternative to the Huber loss.\n+\n+ References:\n+ [Chen et al, 2019](https://openreview.net/pdf?id=rkglvsC9Ym)\n+\n+ Args:\n+ predictions: a vector of arbitrary shape.\n+ targets: a vector of shape compatible with predictions; if not provided\n+ then it is assumed to be zero.\n+\n+ Returns:\n+ the log-cosh loss.\n+ \"\"\"\n+ chex.assert_type([predictions], float)\n+ errors = (predictions - targets) if (targets is not None) else predictions\n+ # log(cosh(x)) = log((exp(x) + exp(-x))/2) = log(exp(x) + exp(-x)) - log(2)\n+ return jnp.logaddexp(errors, -errors) - jnp.log(2.0).astype(errors.dtype)\n", "issue": "Implement log(cosh()) loss function\nlog-cosh is a doubly differentiable alternative to the huber loss. A naive implementation is prone to overflow (since cosh has an e^x term), so I think it'd be a useful addition to the library. Plus, it's implemented in other libraries, such as [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/keras/losses/log_cosh).\r\n\r\nIf this sounds like a relevant addition, I'd be happy to contribute it!\n", "before_files": [{"content": "# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Standard losses used in optimisation.\n\nWe provide implementations of the most canonical losses used in deep\nlearning. These operate transparently on batches, and do not perform any\nreduction over the batch dimensions, leaving it to the user to, for instance,\nmean or sum losses across batch dimensions.\n\"\"\"\n\nfrom typing import Optional\n\nimport chex\nimport jax\nimport jax.numpy as jnp\n\nfrom optax._src import utils\n\n\ndef l2_loss(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n) -> chex.Array:\n \"\"\"Calculates the L2 loss for a set of predictions.\n\n Note: the 0.5 term is standard in \"Pattern Recognition and Machine Learning\"\n by Bishop, but not \"The Elements of Statistical Learning\" by Tibshirani.\n\n References:\n [Chris Bishop, 2006](https://bit.ly/3eeP0ga)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provides\n then it is assumed to be zero.\n\n Returns:\n the squared error loss.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n return 0.5 * (errors)**2\n\n\ndef huber_loss(\n predictions: chex.Array,\n targets: Optional[chex.Array] = None,\n delta: float = 1.) -> chex.Array:\n \"\"\"Huber loss, similar to L2 loss close to zero, L1 loss away from zero.\n\n If gradient descent is applied to the `huber loss`, it is equivalent to\n clipping gradients of an `l2_loss` to `[-delta, delta]` in the backward pass.\n\n References:\n [Huber, 1964](www.projecteuclid.org/download/pdf_1/euclid.aoms/1177703732)\n\n Args:\n predictions: a vector of arbitrary shape.\n targets: a vector of shape compatible with predictions; if not provides\n then it is assumed to be zero.\n delta: the bounds for the huber loss transformation, defaults at 1.\n\n Returns:\n a vector of same shape of `x`.\n \"\"\"\n chex.assert_type([predictions], float)\n errors = (predictions - targets) if (targets is not None) else predictions\n # 0.5 * err^2 if |err| <= d\n # 0.5 * d^2 + d * (|err| - d) if |err| > d\n abs_errors = jnp.abs(errors)\n quadratic = jnp.minimum(abs_errors, delta)\n # Same as max(abs_x - delta, 0) but avoids potentially doubling gradient.\n linear = abs_errors - quadratic\n return 0.5 * quadratic ** 2 + delta * linear\n\n\ndef smooth_labels(\n labels: chex.Array,\n alpha: float,\n) -> jnp.ndarray:\n \"\"\"Apply label smoothing.\n\n Label smoothing is often used in combination with a cross-entropy loss.\n Smoothed labels favour small logit gaps, and it has been shown that this can\n provide better model calibration by preventing overconfident predictions.\n\n References:\n [M\u00fcller et al, 2019](https://arxiv.org/pdf/1906.02629.pdf)\n\n Args:\n labels: one hot labels to be smoothed.\n alpha: the smoothing factor, the greedy category with be assigned\n probability `(1-alpha) + alpha / num_categories`\n\n Returns:\n a smoothed version of the one hot input labels.\n\n \"\"\"\n chex.assert_type([labels], float)\n num_categories = labels.shape[-1]\n return (1.0 - alpha) * labels + alpha / num_categories\n\n\ndef sigmoid_binary_cross_entropy(logits, labels):\n \"\"\"Computes sigmoid cross entropy given logits and multiple class labels.\n\n Measures the probability error in discrete classification tasks in which\n each class is an independent binary prediction and different classes are\n not mutually exclusive. This may be used for multilabel image classification\n for instance a model may predict that an image contains both a cat and a dog.\n\n References:\n [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n\n Args:\n logits: unnormalized log probabilities.\n labels: the probability for that class.\n\n Returns:\n a sigmoid cross entropy loss.\n \"\"\"\n chex.assert_equal_shape([logits, labels])\n chex.assert_type([logits, labels], float)\n log_p = jax.nn.log_sigmoid(logits)\n # log(1 - sigmoid(x)) = log_sigmoid(-x), the latter more numerically stable\n log_not_p = jax.nn.log_sigmoid(-logits)\n return -labels * log_p - (1. - labels) * log_not_p\n\n\ndef softmax_cross_entropy(\n logits: chex.Array,\n labels: chex.Array,\n) -> chex.Array:\n \"\"\"Computes the softmax cross entropy between sets of logits and labels.\n\n Measures the probability error in discrete classification tasks in which\n the classes are mutually exclusive (each entry is in exactly one class).\n For example, each CIFAR-10 image is labeled with one and only one label:\n an image can be a dog or a truck, but not both.\n\n References:\n [Goodfellow et al, 2016](http://www.deeplearningbook.org/contents/prob.html)\n\n Args:\n logits: unnormalized log probabilities.\n labels: a valid probability distribution (non-negative, sum to 1), e.g a\n one hot encoding of which class is the correct one for each input.\n\n Returns:\n the cross entropy loss.\n \"\"\"\n chex.assert_equal_shape([logits, labels])\n chex.assert_type([logits, labels], float)\n return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1)\n\n\ndef cosine_similarity(\n predictions: chex.Array,\n targets: chex.Array,\n epsilon: float = 0.,\n) -> chex.Array:\n r\"\"\"Computes the cosine similarity between targets and predictions.\n\n The cosine **similarity** is a measure of similarity between vectors defined\n as the cosine of the angle between them, which is also the inner product of\n those vectors normalized to have unit norm.\n\n References:\n [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)\n\n Args:\n predictions: The predicted vector.\n targets: Ground truth target vector.\n epsilon: minimum norm for terms in the denominator of the cosine similarity.\n\n Returns:\n cosine similarity values.\n \"\"\"\n chex.assert_equal_shape([targets, predictions])\n chex.assert_type([targets, predictions], float)\n # vectorize norm fn, to treat all dimensions except the last as batch dims.\n batched_norm_fn = jnp.vectorize(\n utils.safe_norm, signature='(k)->()', excluded={1})\n # normalise the last dimension of targets and predictions.\n unit_targets = targets / jnp.expand_dims(\n batched_norm_fn(targets, epsilon), axis=-1)\n unit_predictions = predictions / jnp.expand_dims(\n batched_norm_fn(predictions, epsilon), axis=-1)\n # return cosine similarity.\n return jnp.sum(unit_targets * unit_predictions, axis=-1)\n\n\ndef cosine_distance(\n predictions: chex.Array,\n targets: chex.Array,\n epsilon: float = 0.,\n) -> chex.Array:\n r\"\"\"Computes the cosine distance between targets and predictions.\n\n The cosine **distance**, implemented here, measures the **dissimilarity**\n of two vectors as the opposite of cosine **similarity**: `1 - cos(\\theta)`.\n\n References:\n [Wikipedia, 2021](https://en.wikipedia.org/wiki/Cosine_similarity)\n\n Args:\n predictions: The predicted vector.\n targets: Ground truth target vector.\n epsilon: minimum norm for terms in the denominator of the cosine similarity.\n\n Returns:\n cosine similarity values.\n \"\"\"\n chex.assert_equal_shape([targets, predictions])\n chex.assert_type([targets, predictions], float)\n # cosine distance = 1 - cosine similarity.\n return 1. - cosine_similarity(predictions, targets, epsilon)\n", "path": "optax/_src/loss.py"}]}
| 3,223 | 378 |
gh_patches_debug_40839
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-147
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stop reading Cloud SDK's configuration to obtain the project ID
Instead, we should subprocess out to `gcloud config config-helper --format json`
Example output:
```
{
"configuration": {
"active_configuration": "default",
"properties": {
"core": {
"account": "[ELIDED]",
"disable_usage_reporting": "False",
"project": "[ELIDED]"
}
}
},
"credential": {
"access_token": "[ELIDED]",
"token_expiry": "2017-03-23T23:09:49Z"
},
"sentinels": {
"config_sentinel": "/Users/jonwayne/.config/gcloud/config_sentinel"
}
}
```
Note: we should *not* use the auth token provided here at all. We should keep our existing logic of reading `~/.config/gcloud/application_default_credentials.json` because those credentials are populated exclusively by `gcloud auth application-default login` and intended for Application Default Credentials.
</issue>
<code>
[start of google/auth/_cloud_sdk.py]
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helpers for reading the Google Cloud SDK's configuration."""
16
17 import io
18 import os
19
20 import six
21 from six.moves import configparser
22
23 from google.auth import environment_vars
24 import google.oauth2.credentials
25
26 # The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
27 _GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'
28
29 # The ~/.config subdirectory containing gcloud credentials.
30 _CONFIG_DIRECTORY = 'gcloud'
31 # Windows systems store config at %APPDATA%\gcloud
32 _WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'
33 # The name of the file in the Cloud SDK config that contains default
34 # credentials.
35 _CREDENTIALS_FILENAME = 'application_default_credentials.json'
36 # The config section and key for the project ID in the cloud SDK config.
37 _PROJECT_CONFIG_SECTION = 'core'
38 _PROJECT_CONFIG_KEY = 'project'
39
40
41 def get_config_path():
42 """Returns the absolute path the the Cloud SDK's configuration directory.
43
44 Returns:
45 str: The Cloud SDK config path.
46 """
47 # If the path is explicitly set, return that.
48 try:
49 return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]
50 except KeyError:
51 pass
52
53 # Non-windows systems store this at ~/.config/gcloud
54 if os.name != 'nt':
55 return os.path.join(
56 os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)
57 # Windows systems store config at %APPDATA%\gcloud
58 else:
59 try:
60 return os.path.join(
61 os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],
62 _CONFIG_DIRECTORY)
63 except KeyError:
64 # This should never happen unless someone is really
65 # messing with things, but we'll cover the case anyway.
66 drive = os.environ.get('SystemDrive', 'C:')
67 return os.path.join(
68 drive, '\\', _CONFIG_DIRECTORY)
69
70
71 def get_application_default_credentials_path():
72 """Gets the path to the application default credentials file.
73
74 The path may or may not exist.
75
76 Returns:
77 str: The full path to application default credentials.
78 """
79 config_path = get_config_path()
80 return os.path.join(config_path, _CREDENTIALS_FILENAME)
81
82
83 def _get_active_config(config_path):
84 """Gets the active config for the Cloud SDK.
85
86 Args:
87 config_path (str): The Cloud SDK's config path.
88
89 Returns:
90 str: The active configuration name.
91 """
92 active_config_filename = os.path.join(config_path, 'active_config')
93
94 if not os.path.isfile(active_config_filename):
95 return 'default'
96
97 with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:
98 active_config_name = file_obj.read().strip()
99
100 return active_config_name
101
102
103 def _get_config_file(config_path, config_name):
104 """Returns the full path to a configuration's config file.
105
106 Args:
107 config_path (str): The Cloud SDK's config path.
108 config_name (str): The configuration name.
109
110 Returns:
111 str: The config file path.
112 """
113 return os.path.join(
114 config_path, 'configurations', 'config_{}'.format(config_name))
115
116
117 def get_project_id():
118 """Gets the project ID from the Cloud SDK's configuration.
119
120 Returns:
121 Optional[str]: The project ID.
122 """
123 config_path = get_config_path()
124 active_config = _get_active_config(config_path)
125 config_file = _get_config_file(config_path, active_config)
126
127 if not os.path.isfile(config_file):
128 return None
129
130 config = configparser.RawConfigParser()
131
132 try:
133 config.read(config_file)
134
135 if config.has_section(_PROJECT_CONFIG_SECTION):
136 return config.get(
137 _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)
138
139 except configparser.Error:
140 return None
141
142
143 def load_authorized_user_credentials(info):
144 """Loads an authorized user credential.
145
146 Args:
147 info (Mapping[str, str]): The loaded file's data.
148
149 Returns:
150 google.oauth2.credentials.Credentials: The constructed credentials.
151
152 Raises:
153 ValueError: if the info is in the wrong format or missing data.
154 """
155 keys_needed = set(('refresh_token', 'client_id', 'client_secret'))
156 missing = keys_needed.difference(six.iterkeys(info))
157
158 if missing:
159 raise ValueError(
160 'Authorized user info was not in the expected format, missing '
161 'fields {}.'.format(', '.join(missing)))
162
163 return google.oauth2.credentials.Credentials(
164 None, # No access token, must be refreshed.
165 refresh_token=info['refresh_token'],
166 token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,
167 client_id=info['client_id'],
168 client_secret=info['client_secret'])
169
[end of google/auth/_cloud_sdk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py
--- a/google/auth/_cloud_sdk.py
+++ b/google/auth/_cloud_sdk.py
@@ -14,11 +14,11 @@
"""Helpers for reading the Google Cloud SDK's configuration."""
-import io
+import json
import os
+import subprocess
import six
-from six.moves import configparser
from google.auth import environment_vars
import google.oauth2.credentials
@@ -33,9 +33,9 @@
# The name of the file in the Cloud SDK config that contains default
# credentials.
_CREDENTIALS_FILENAME = 'application_default_credentials.json'
-# The config section and key for the project ID in the cloud SDK config.
-_PROJECT_CONFIG_SECTION = 'core'
-_PROJECT_CONFIG_KEY = 'project'
+# The command to get the Cloud SDK configuration
+_CLOUD_SDK_CONFIG_COMMAND = (
+ 'gcloud', 'config', 'config-helper', '--format', 'json')
def get_config_path():
@@ -80,66 +80,6 @@
return os.path.join(config_path, _CREDENTIALS_FILENAME)
-def _get_active_config(config_path):
- """Gets the active config for the Cloud SDK.
-
- Args:
- config_path (str): The Cloud SDK's config path.
-
- Returns:
- str: The active configuration name.
- """
- active_config_filename = os.path.join(config_path, 'active_config')
-
- if not os.path.isfile(active_config_filename):
- return 'default'
-
- with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:
- active_config_name = file_obj.read().strip()
-
- return active_config_name
-
-
-def _get_config_file(config_path, config_name):
- """Returns the full path to a configuration's config file.
-
- Args:
- config_path (str): The Cloud SDK's config path.
- config_name (str): The configuration name.
-
- Returns:
- str: The config file path.
- """
- return os.path.join(
- config_path, 'configurations', 'config_{}'.format(config_name))
-
-
-def get_project_id():
- """Gets the project ID from the Cloud SDK's configuration.
-
- Returns:
- Optional[str]: The project ID.
- """
- config_path = get_config_path()
- active_config = _get_active_config(config_path)
- config_file = _get_config_file(config_path, active_config)
-
- if not os.path.isfile(config_file):
- return None
-
- config = configparser.RawConfigParser()
-
- try:
- config.read(config_file)
-
- if config.has_section(_PROJECT_CONFIG_SECTION):
- return config.get(
- _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)
-
- except configparser.Error:
- return None
-
-
def load_authorized_user_credentials(info):
"""Loads an authorized user credential.
@@ -166,3 +106,28 @@
token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,
client_id=info['client_id'],
client_secret=info['client_secret'])
+
+
+def get_project_id():
+ """Gets the project ID from the Cloud SDK.
+
+ Returns:
+ Optional[str]: The project ID.
+ """
+
+ try:
+ output = subprocess.check_output(
+ _CLOUD_SDK_CONFIG_COMMAND,
+ stderr=subprocess.STDOUT)
+ except (subprocess.CalledProcessError, OSError, IOError):
+ return None
+
+ try:
+ configuration = json.loads(output.decode('utf-8'))
+ except ValueError:
+ return None
+
+ try:
+ return configuration['configuration']['properties']['core']['project']
+ except KeyError:
+ return None
|
{"golden_diff": "diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py\n--- a/google/auth/_cloud_sdk.py\n+++ b/google/auth/_cloud_sdk.py\n@@ -14,11 +14,11 @@\n \n \"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n \n-import io\n+import json\n import os\n+import subprocess\n \n import six\n-from six.moves import configparser\n \n from google.auth import environment_vars\n import google.oauth2.credentials\n@@ -33,9 +33,9 @@\n # The name of the file in the Cloud SDK config that contains default\n # credentials.\n _CREDENTIALS_FILENAME = 'application_default_credentials.json'\n-# The config section and key for the project ID in the cloud SDK config.\n-_PROJECT_CONFIG_SECTION = 'core'\n-_PROJECT_CONFIG_KEY = 'project'\n+# The command to get the Cloud SDK configuration\n+_CLOUD_SDK_CONFIG_COMMAND = (\n+ 'gcloud', 'config', 'config-helper', '--format', 'json')\n \n \n def get_config_path():\n@@ -80,66 +80,6 @@\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n \n \n-def _get_active_config(config_path):\n- \"\"\"Gets the active config for the Cloud SDK.\n-\n- Args:\n- config_path (str): The Cloud SDK's config path.\n-\n- Returns:\n- str: The active configuration name.\n- \"\"\"\n- active_config_filename = os.path.join(config_path, 'active_config')\n-\n- if not os.path.isfile(active_config_filename):\n- return 'default'\n-\n- with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n- active_config_name = file_obj.read().strip()\n-\n- return active_config_name\n-\n-\n-def _get_config_file(config_path, config_name):\n- \"\"\"Returns the full path to a configuration's config file.\n-\n- Args:\n- config_path (str): The Cloud SDK's config path.\n- config_name (str): The configuration name.\n-\n- Returns:\n- str: The config file path.\n- \"\"\"\n- return os.path.join(\n- config_path, 'configurations', 'config_{}'.format(config_name))\n-\n-\n-def get_project_id():\n- \"\"\"Gets the project ID from the Cloud SDK's configuration.\n-\n- Returns:\n- Optional[str]: The project ID.\n- \"\"\"\n- config_path = get_config_path()\n- active_config = _get_active_config(config_path)\n- config_file = _get_config_file(config_path, active_config)\n-\n- if not os.path.isfile(config_file):\n- return None\n-\n- config = configparser.RawConfigParser()\n-\n- try:\n- config.read(config_file)\n-\n- if config.has_section(_PROJECT_CONFIG_SECTION):\n- return config.get(\n- _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n-\n- except configparser.Error:\n- return None\n-\n-\n def load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n \n@@ -166,3 +106,28 @@\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n+\n+\n+def get_project_id():\n+ \"\"\"Gets the project ID from the Cloud SDK.\n+\n+ Returns:\n+ Optional[str]: The project ID.\n+ \"\"\"\n+\n+ try:\n+ output = subprocess.check_output(\n+ _CLOUD_SDK_CONFIG_COMMAND,\n+ stderr=subprocess.STDOUT)\n+ except (subprocess.CalledProcessError, OSError, IOError):\n+ return None\n+\n+ try:\n+ configuration = json.loads(output.decode('utf-8'))\n+ except ValueError:\n+ return None\n+\n+ try:\n+ return configuration['configuration']['properties']['core']['project']\n+ except KeyError:\n+ return None\n", "issue": "Stop reading Cloud SDK's configuration to obtain the project ID\nInstead, we should subprocess out to `gcloud config config-helper --format json`\r\n\r\nExample output:\r\n\r\n```\r\n{\r\n \"configuration\": {\r\n \"active_configuration\": \"default\",\r\n \"properties\": {\r\n \"core\": {\r\n \"account\": \"[ELIDED]\",\r\n \"disable_usage_reporting\": \"False\",\r\n \"project\": \"[ELIDED]\"\r\n }\r\n }\r\n },\r\n \"credential\": {\r\n \"access_token\": \"[ELIDED]\",\r\n \"token_expiry\": \"2017-03-23T23:09:49Z\"\r\n },\r\n \"sentinels\": {\r\n \"config_sentinel\": \"/Users/jonwayne/.config/gcloud/config_sentinel\"\r\n }\r\n}\r\n```\r\n\r\nNote: we should *not* use the auth token provided here at all. We should keep our existing logic of reading `~/.config/gcloud/application_default_credentials.json` because those credentials are populated exclusively by `gcloud auth application-default login` and intended for Application Default Credentials.\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport io\nimport os\n\nimport six\nfrom six.moves import configparser\n\nfrom google.auth import environment_vars\nimport google.oauth2.credentials\n\n# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.\n_GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = 'gcloud'\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = 'application_default_credentials.json'\n# The config section and key for the project ID in the cloud SDK config.\n_PROJECT_CONFIG_SECTION = 'core'\n_PROJECT_CONFIG_KEY = 'project'\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != 'nt':\n return os.path.join(\n os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],\n _CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get('SystemDrive', 'C:')\n return os.path.join(\n drive, '\\\\', _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef _get_active_config(config_path):\n \"\"\"Gets the active config for the Cloud SDK.\n\n Args:\n config_path (str): The Cloud SDK's config path.\n\n Returns:\n str: The active configuration name.\n \"\"\"\n active_config_filename = os.path.join(config_path, 'active_config')\n\n if not os.path.isfile(active_config_filename):\n return 'default'\n\n with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n active_config_name = file_obj.read().strip()\n\n return active_config_name\n\n\ndef _get_config_file(config_path, config_name):\n \"\"\"Returns the full path to a configuration's config file.\n\n Args:\n config_path (str): The Cloud SDK's config path.\n config_name (str): The configuration name.\n\n Returns:\n str: The config file path.\n \"\"\"\n return os.path.join(\n config_path, 'configurations', 'config_{}'.format(config_name))\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK's configuration.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n config_path = get_config_path()\n active_config = _get_active_config(config_path)\n config_file = _get_config_file(config_path, active_config)\n\n if not os.path.isfile(config_file):\n return None\n\n config = configparser.RawConfigParser()\n\n try:\n config.read(config_file)\n\n if config.has_section(_PROJECT_CONFIG_SECTION):\n return config.get(\n _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n\n except configparser.Error:\n return None\n\n\ndef load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n\n Args:\n info (Mapping[str, str]): The loaded file's data.\n\n Returns:\n google.oauth2.credentials.Credentials: The constructed credentials.\n\n Raises:\n ValueError: if the info is in the wrong format or missing data.\n \"\"\"\n keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n missing = keys_needed.difference(six.iterkeys(info))\n\n if missing:\n raise ValueError(\n 'Authorized user info was not in the expected format, missing '\n 'fields {}.'.format(', '.join(missing)))\n\n return google.oauth2.credentials.Credentials(\n None, # No access token, must be refreshed.\n refresh_token=info['refresh_token'],\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n", "path": "google/auth/_cloud_sdk.py"}]}
| 2,310 | 837 |
gh_patches_debug_36535
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-2123
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Predictive class fails for non-scalar sites
I usually use raw poutines to for prediction:
```py
vectorize = pyro.plate("_vectorize", num_samples, dim=-2)
trace = poutine.trace(vectorize(guide)).get_trace()
samples = poutine.replay(vectorize(model), trace)()
```
When instead I try the new `Predictive` class, this fails on models with nontrivial plate structure:
```py
predictive = Predictive(model, guide=guide, return_sites=["x", "y"],
num_samples=num_samples, parallel=True)
samples = predictive.get_samples()
```
I believe this is due to reshaping of posterior samples without regard to their event shape. I don't know why `_predictive()` even needs to reshape samples.
I have added a failing test in this branch. Feel free to take over the branch and create a PR:
https://github.com/pyro-ppl/pyro/pull/new/fix-preditive-shapes
</issue>
<code>
[start of pyro/infer/predictive.py]
1 from functools import reduce
2 import warnings
3
4 import torch
5
6 import pyro
7 import pyro.poutine as poutine
8 from pyro.poutine.util import prune_subsample_sites
9
10
11 def _guess_max_plate_nesting(model, args, kwargs):
12 """
13 Guesses max_plate_nesting by running the model once
14 without enumeration. This optimistically assumes static model
15 structure.
16 """
17 with poutine.block():
18 model_trace = poutine.trace(model).get_trace(*args, **kwargs)
19 sites = [site for site in model_trace.nodes.values()
20 if site["type"] == "sample"]
21
22 dims = [frame.dim
23 for site in sites
24 for frame in site["cond_indep_stack"]
25 if frame.vectorized]
26 max_plate_nesting = -min(dims) if dims else 0
27 return max_plate_nesting
28
29
30 def _predictive_sequential(model, posterior_samples, model_args, model_kwargs,
31 num_samples, sample_sites, return_trace=False):
32 collected = []
33 samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]
34 for i in range(num_samples):
35 trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(*model_args, **model_kwargs)
36 if return_trace:
37 collected.append(trace)
38 else:
39 collected.append({site: trace.nodes[site]['value'] for site in sample_sites})
40
41 return collected if return_trace else {site: torch.stack([s[site] for s in collected])
42 for site in sample_sites}
43
44
45 def _predictive(model, posterior_samples, num_samples, return_sites=None,
46 return_trace=False, parallel=False, model_args=(), model_kwargs={}):
47 max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)
48 vectorize = pyro.plate("_num_predictive_samples", num_samples, dim=-max_plate_nesting-1)
49 model_trace = prune_subsample_sites(poutine.trace(model).get_trace(*model_args, **model_kwargs))
50 reshaped_samples = {}
51
52 for name, sample in posterior_samples.items():
53 sample_shape = sample.shape[1:]
54 sample = sample.reshape((num_samples,) + (1,) * (max_plate_nesting - len(sample_shape)) + sample_shape)
55 reshaped_samples[name] = sample
56
57 if return_trace:
58 trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\
59 .get_trace(*model_args, **model_kwargs)
60 return trace
61
62 return_site_shapes = {}
63 for site in model_trace.stochastic_nodes + model_trace.observation_nodes:
64 site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape
65 if isinstance(return_sites, (list, tuple, set)):
66 if site in return_sites:
67 return_site_shapes[site] = site_shape
68 else:
69 if (return_sites is not None) or (site not in reshaped_samples):
70 return_site_shapes[site] = site_shape
71
72 # handle _RETURN site
73 if isinstance(return_sites, (list, tuple, set)) and '_RETURN' in return_sites:
74 value = model_trace.nodes['_RETURN']['value']
75 shape = (num_samples,) + value.shape if torch.is_tensor(value) else None
76 return_site_shapes['_RETURN'] = shape
77
78 if not parallel:
79 return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,
80 return_site_shapes.keys(), return_trace=False)
81
82 trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\
83 .get_trace(*model_args, **model_kwargs)
84 predictions = {}
85 for site, shape in return_site_shapes.items():
86 value = trace.nodes[site]['value']
87 if site == '_RETURN' and shape is None:
88 predictions[site] = value
89 continue
90 if value.numel() < reduce((lambda x, y: x * y), shape):
91 predictions[site] = value.expand(shape)
92 else:
93 predictions[site] = value.reshape(shape)
94
95 return predictions
96
97
98 class Predictive:
99 """
100 This class is used to construct predictive distribution. The predictive distribution is obtained
101 by running model conditioned on latent samples from `posterior_samples`.
102
103 .. warning::
104 The interface for the :class:`Predictive` class is experimental, and
105 might change in the future.
106
107 :param model: Python callable containing Pyro primitives.
108 :param dict posterior_samples: dictionary of samples from the posterior.
109 :param callable guide: optional guide to get posterior samples of sites not present
110 in `posterior_samples`.
111 :param int num_samples: number of samples to draw from the predictive distribution.
112 This argument has no effect if ``posterior_samples`` is non-empty, in which case,
113 the leading dimension size of samples in ``posterior_samples`` is used.
114 :param return_sites: sites to return; by default only sample sites not present
115 in `posterior_samples` are returned.
116 :type return_sites: list, tuple, or set
117 :param bool parallel: predict in parallel by wrapping the existing model
118 in an outermost `plate` messenger. Note that this requires that the model has
119 all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`.
120 """
121 def __init__(self, model, posterior_samples=None, guide=None, num_samples=None,
122 return_sites=None, parallel=False):
123 if posterior_samples is None and num_samples is None:
124 raise ValueError("Either posterior_samples or num_samples must be specified.")
125
126 posterior_samples = {} if posterior_samples is None else posterior_samples
127
128 for name, sample in posterior_samples.items():
129 batch_size = sample.shape[0]
130 if num_samples is None:
131 num_samples = batch_size
132 elif num_samples != batch_size:
133 warnings.warn("Sample's leading dimension size {} is different from the "
134 "provided {} num_samples argument. Defaulting to {}."
135 .format(batch_size, num_samples, batch_size), UserWarning)
136 num_samples = batch_size
137
138 if num_samples is None:
139 raise ValueError("No sample sites in posterior samples to infer `num_samples`.")
140
141 if return_sites is not None:
142 assert isinstance(return_sites, (list, tuple, set))
143
144 self.model = model
145 self.posterior_samples = {} if posterior_samples is None else posterior_samples
146 self.num_samples = num_samples
147 self.guide = guide
148 self.return_sites = return_sites
149 self.parallel = parallel
150
151 def get_samples(self, *args, **kwargs):
152 """
153 Returns dict of samples from the predictive distribution. By default, only sample sites not
154 contained in `posterior_samples` are returned. This can be modified by changing the
155 `return_sites` keyword argument of this :class:`Predictive` instance.
156
157 :param args: model arguments.
158 :param kwargs: model keyword arguments.
159 """
160 posterior_samples = self.posterior_samples
161 if self.guide is not None:
162 # use return_sites='' as a special signal to return all sites
163 posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,
164 return_sites='', parallel=self.parallel,
165 model_args=args, model_kwargs=kwargs)
166 return _predictive(self.model, posterior_samples, self.num_samples,
167 return_sites=self.return_sites, parallel=self.parallel,
168 model_args=args, model_kwargs=kwargs)
169
170 def get_vectorized_trace(self, *args, **kwargs):
171 """
172 Returns a single vectorized `trace` from the predictive distribution. Note that this
173 requires that the model has all batch dims correctly annotated via :class:`~pyro.plate`.
174
175 :param args: model arguments.
176 :param kwargs: model keyword arguments.
177 """
178 posterior_samples = self.posterior_samples
179 if self.guide is not None:
180 # use return_sites='' as a special signal to return all sites
181 posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,
182 return_sites='', parallel=self.parallel,
183 model_args=args, model_kwargs=kwargs)
184 return _predictive(self.model, posterior_samples, self.num_samples,
185 return_trace=True, model_args=args, model_kwargs=kwargs)
186
[end of pyro/infer/predictive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/infer/predictive.py b/pyro/infer/predictive.py
--- a/pyro/infer/predictive.py
+++ b/pyro/infer/predictive.py
@@ -28,7 +28,7 @@
def _predictive_sequential(model, posterior_samples, model_args, model_kwargs,
- num_samples, sample_sites, return_trace=False):
+ num_samples, return_site_shapes, return_trace=False):
collected = []
samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]
for i in range(num_samples):
@@ -36,10 +36,13 @@
if return_trace:
collected.append(trace)
else:
- collected.append({site: trace.nodes[site]['value'] for site in sample_sites})
+ collected.append({site: trace.nodes[site]['value'] for site in return_site_shapes})
- return collected if return_trace else {site: torch.stack([s[site] for s in collected])
- for site in sample_sites}
+ if return_trace:
+ return collected
+ else:
+ return {site: torch.stack([s[site] for s in collected]).reshape(shape)
+ for site, shape in return_site_shapes.items()}
def _predictive(model, posterior_samples, num_samples, return_sites=None,
@@ -61,7 +64,8 @@
return_site_shapes = {}
for site in model_trace.stochastic_nodes + model_trace.observation_nodes:
- site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape
+ append_ndim = max_plate_nesting - len(model_trace.nodes[site]["fn"].batch_shape)
+ site_shape = (num_samples,) + (1,) * append_ndim + model_trace.nodes[site]['value'].shape
if isinstance(return_sites, (list, tuple, set)):
if site in return_sites:
return_site_shapes[site] = site_shape
@@ -77,7 +81,7 @@
if not parallel:
return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,
- return_site_shapes.keys(), return_trace=False)
+ return_site_shapes, return_trace=False)
trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\
.get_trace(*model_args, **model_kwargs)
|
{"golden_diff": "diff --git a/pyro/infer/predictive.py b/pyro/infer/predictive.py\n--- a/pyro/infer/predictive.py\n+++ b/pyro/infer/predictive.py\n@@ -28,7 +28,7 @@\n \n \n def _predictive_sequential(model, posterior_samples, model_args, model_kwargs,\n- num_samples, sample_sites, return_trace=False):\n+ num_samples, return_site_shapes, return_trace=False):\n collected = []\n samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]\n for i in range(num_samples):\n@@ -36,10 +36,13 @@\n if return_trace:\n collected.append(trace)\n else:\n- collected.append({site: trace.nodes[site]['value'] for site in sample_sites})\n+ collected.append({site: trace.nodes[site]['value'] for site in return_site_shapes})\n \n- return collected if return_trace else {site: torch.stack([s[site] for s in collected])\n- for site in sample_sites}\n+ if return_trace:\n+ return collected\n+ else:\n+ return {site: torch.stack([s[site] for s in collected]).reshape(shape)\n+ for site, shape in return_site_shapes.items()}\n \n \n def _predictive(model, posterior_samples, num_samples, return_sites=None,\n@@ -61,7 +64,8 @@\n \n return_site_shapes = {}\n for site in model_trace.stochastic_nodes + model_trace.observation_nodes:\n- site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape\n+ append_ndim = max_plate_nesting - len(model_trace.nodes[site][\"fn\"].batch_shape)\n+ site_shape = (num_samples,) + (1,) * append_ndim + model_trace.nodes[site]['value'].shape\n if isinstance(return_sites, (list, tuple, set)):\n if site in return_sites:\n return_site_shapes[site] = site_shape\n@@ -77,7 +81,7 @@\n \n if not parallel:\n return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,\n- return_site_shapes.keys(), return_trace=False)\n+ return_site_shapes, return_trace=False)\n \n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n", "issue": "Predictive class fails for non-scalar sites\nI usually use raw poutines to for prediction:\r\n```py\r\nvectorize = pyro.plate(\"_vectorize\", num_samples, dim=-2)\r\ntrace = poutine.trace(vectorize(guide)).get_trace()\r\nsamples = poutine.replay(vectorize(model), trace)()\r\n```\r\nWhen instead I try the new `Predictive` class, this fails on models with nontrivial plate structure:\r\n```py\r\npredictive = Predictive(model, guide=guide, return_sites=[\"x\", \"y\"],\r\n num_samples=num_samples, parallel=True)\r\nsamples = predictive.get_samples()\r\n```\r\nI believe this is due to reshaping of posterior samples without regard to their event shape. I don't know why `_predictive()` even needs to reshape samples.\r\n\r\nI have added a failing test in this branch. Feel free to take over the branch and create a PR:\r\nhttps://github.com/pyro-ppl/pyro/pull/new/fix-preditive-shapes\n", "before_files": [{"content": "from functools import reduce\nimport warnings\n\nimport torch\n\nimport pyro\nimport pyro.poutine as poutine\nfrom pyro.poutine.util import prune_subsample_sites\n\n\ndef _guess_max_plate_nesting(model, args, kwargs):\n \"\"\"\n Guesses max_plate_nesting by running the model once\n without enumeration. This optimistically assumes static model\n structure.\n \"\"\"\n with poutine.block():\n model_trace = poutine.trace(model).get_trace(*args, **kwargs)\n sites = [site for site in model_trace.nodes.values()\n if site[\"type\"] == \"sample\"]\n\n dims = [frame.dim\n for site in sites\n for frame in site[\"cond_indep_stack\"]\n if frame.vectorized]\n max_plate_nesting = -min(dims) if dims else 0\n return max_plate_nesting\n\n\ndef _predictive_sequential(model, posterior_samples, model_args, model_kwargs,\n num_samples, sample_sites, return_trace=False):\n collected = []\n samples = [{k: v[i] for k, v in posterior_samples.items()} for i in range(num_samples)]\n for i in range(num_samples):\n trace = poutine.trace(poutine.condition(model, samples[i])).get_trace(*model_args, **model_kwargs)\n if return_trace:\n collected.append(trace)\n else:\n collected.append({site: trace.nodes[site]['value'] for site in sample_sites})\n\n return collected if return_trace else {site: torch.stack([s[site] for s in collected])\n for site in sample_sites}\n\n\ndef _predictive(model, posterior_samples, num_samples, return_sites=None,\n return_trace=False, parallel=False, model_args=(), model_kwargs={}):\n max_plate_nesting = _guess_max_plate_nesting(model, model_args, model_kwargs)\n vectorize = pyro.plate(\"_num_predictive_samples\", num_samples, dim=-max_plate_nesting-1)\n model_trace = prune_subsample_sites(poutine.trace(model).get_trace(*model_args, **model_kwargs))\n reshaped_samples = {}\n\n for name, sample in posterior_samples.items():\n sample_shape = sample.shape[1:]\n sample = sample.reshape((num_samples,) + (1,) * (max_plate_nesting - len(sample_shape)) + sample_shape)\n reshaped_samples[name] = sample\n\n if return_trace:\n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n return trace\n\n return_site_shapes = {}\n for site in model_trace.stochastic_nodes + model_trace.observation_nodes:\n site_shape = (num_samples,) + model_trace.nodes[site]['value'].shape\n if isinstance(return_sites, (list, tuple, set)):\n if site in return_sites:\n return_site_shapes[site] = site_shape\n else:\n if (return_sites is not None) or (site not in reshaped_samples):\n return_site_shapes[site] = site_shape\n\n # handle _RETURN site\n if isinstance(return_sites, (list, tuple, set)) and '_RETURN' in return_sites:\n value = model_trace.nodes['_RETURN']['value']\n shape = (num_samples,) + value.shape if torch.is_tensor(value) else None\n return_site_shapes['_RETURN'] = shape\n\n if not parallel:\n return _predictive_sequential(model, posterior_samples, model_args, model_kwargs, num_samples,\n return_site_shapes.keys(), return_trace=False)\n\n trace = poutine.trace(poutine.condition(vectorize(model), reshaped_samples))\\\n .get_trace(*model_args, **model_kwargs)\n predictions = {}\n for site, shape in return_site_shapes.items():\n value = trace.nodes[site]['value']\n if site == '_RETURN' and shape is None:\n predictions[site] = value\n continue\n if value.numel() < reduce((lambda x, y: x * y), shape):\n predictions[site] = value.expand(shape)\n else:\n predictions[site] = value.reshape(shape)\n\n return predictions\n\n\nclass Predictive:\n \"\"\"\n This class is used to construct predictive distribution. The predictive distribution is obtained\n by running model conditioned on latent samples from `posterior_samples`.\n\n .. warning::\n The interface for the :class:`Predictive` class is experimental, and\n might change in the future.\n\n :param model: Python callable containing Pyro primitives.\n :param dict posterior_samples: dictionary of samples from the posterior.\n :param callable guide: optional guide to get posterior samples of sites not present\n in `posterior_samples`.\n :param int num_samples: number of samples to draw from the predictive distribution.\n This argument has no effect if ``posterior_samples`` is non-empty, in which case,\n the leading dimension size of samples in ``posterior_samples`` is used.\n :param return_sites: sites to return; by default only sample sites not present\n in `posterior_samples` are returned.\n :type return_sites: list, tuple, or set\n :param bool parallel: predict in parallel by wrapping the existing model\n in an outermost `plate` messenger. Note that this requires that the model has\n all batch dims correctly annotated via :class:`~pyro.plate`. Default is `False`.\n \"\"\"\n def __init__(self, model, posterior_samples=None, guide=None, num_samples=None,\n return_sites=None, parallel=False):\n if posterior_samples is None and num_samples is None:\n raise ValueError(\"Either posterior_samples or num_samples must be specified.\")\n\n posterior_samples = {} if posterior_samples is None else posterior_samples\n\n for name, sample in posterior_samples.items():\n batch_size = sample.shape[0]\n if num_samples is None:\n num_samples = batch_size\n elif num_samples != batch_size:\n warnings.warn(\"Sample's leading dimension size {} is different from the \"\n \"provided {} num_samples argument. Defaulting to {}.\"\n .format(batch_size, num_samples, batch_size), UserWarning)\n num_samples = batch_size\n\n if num_samples is None:\n raise ValueError(\"No sample sites in posterior samples to infer `num_samples`.\")\n\n if return_sites is not None:\n assert isinstance(return_sites, (list, tuple, set))\n\n self.model = model\n self.posterior_samples = {} if posterior_samples is None else posterior_samples\n self.num_samples = num_samples\n self.guide = guide\n self.return_sites = return_sites\n self.parallel = parallel\n\n def get_samples(self, *args, **kwargs):\n \"\"\"\n Returns dict of samples from the predictive distribution. By default, only sample sites not\n contained in `posterior_samples` are returned. This can be modified by changing the\n `return_sites` keyword argument of this :class:`Predictive` instance.\n\n :param args: model arguments.\n :param kwargs: model keyword arguments.\n \"\"\"\n posterior_samples = self.posterior_samples\n if self.guide is not None:\n # use return_sites='' as a special signal to return all sites\n posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,\n return_sites='', parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n return _predictive(self.model, posterior_samples, self.num_samples,\n return_sites=self.return_sites, parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n\n def get_vectorized_trace(self, *args, **kwargs):\n \"\"\"\n Returns a single vectorized `trace` from the predictive distribution. Note that this\n requires that the model has all batch dims correctly annotated via :class:`~pyro.plate`.\n\n :param args: model arguments.\n :param kwargs: model keyword arguments.\n \"\"\"\n posterior_samples = self.posterior_samples\n if self.guide is not None:\n # use return_sites='' as a special signal to return all sites\n posterior_samples = _predictive(self.guide, posterior_samples, self.num_samples,\n return_sites='', parallel=self.parallel,\n model_args=args, model_kwargs=kwargs)\n return _predictive(self.model, posterior_samples, self.num_samples,\n return_trace=True, model_args=args, model_kwargs=kwargs)\n", "path": "pyro/infer/predictive.py"}]}
| 2,975 | 529 |
gh_patches_debug_25173
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-3626
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
az postgres up does not create the resource group
## Describe the bug
**Tutorial link:** [Deploy a Django web app with PostgreSQL in Azure App Service](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone)
It is expected that the `az postgres up` command creates the resource group if it does not already exist, as mentioned in the [doc](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone#3-create-postgres-database-in-azure).
Running this command on Azure CLI version **2.22.1** and `db-up` extension version **0.2.3** works as expected.

However, this does not work with the latest Azure CLI version **2.24.2** and `db-up` extension version **0.2.3**.
**Command Name**
`az postgres up
Extension Name: db-up. Version: 0.2.3.`
**Errors:**
```
(ResourceGroupNotFound) Resource group 'DjangoPostgres-tutorial-rg-2242' could not be found.
```
## To Reproduce:
Steps to reproduce the behavior. Note that argument values have been redacted, as they may contain sensitive information.
- Install Azure CLI version `2.24.2`.
- `az postgres up --resource-group {} --location {} --sku-name {} --server-name {} --database-name {} --admin-user {} --admin-password {} --ssl-enforcement {}`
## Expected Behavior
The command should create the specified resource group and proceed with provisioning the postgres db server, as shown in the screenshot above.
## Environment Summary
```
Windows-10-10.0.19041-SP0
Python 3.8.9
Installer: MSI
azure-cli 2.24.2
azure-cli-ml 1.24.0
Extensions:
azure-cli-ml 1.24.0
azure-devops 0.18.0
azure-iot 0.10.9
db-up 0.2.3
mesh 0.10.6
resource-graph 1.1.0
scheduled-query 0.2.0
spring-cloud 1.1.1
```
## Additional Context
GitHub Issue: https://github.com/MicrosoftDocs/azure-docs/issues/76722
<!--Please don't remove this:-->
<!--auto-generated-->
</issue>
<code>
[start of src/db-up/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.2.3"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 3',
19 'Programming Language :: Python :: 3.6',
20 'Programming Language :: Python :: 3.7',
21 'Programming Language :: Python :: 3.8',
22 'License :: OSI Approved :: MIT License',
23 ]
24
25 DEPENDENCIES = [
26 'Cython==0.29.17',
27 'mysql-connector-python==8.0.14',
28 'psycopg2-binary==2.8.5'
29 ]
30
31 setup(
32 name='db-up',
33 version=VERSION,
34 description='Additional commands to simplify Azure Database workflows.',
35 long_description='An Azure CLI Extension to provide additional DB commands.',
36 license='MIT',
37 author='Microsoft Corporation',
38 author_email='[email protected]',
39 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/db-up',
40 classifiers=CLASSIFIERS,
41 package_data={'azext_db_up': ['azext_metadata.json', 'random_name/*']},
42 packages=find_packages(exclude=["tests"]),
43 install_requires=DEPENDENCIES
44 )
45
[end of src/db-up/setup.py]
[start of src/db-up/azext_db_up/_validators.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 # pylint: disable=import-error
7 import uuid
8 from six.moves import configparser
9 from azure.cli.core.commands.validators import get_default_location_from_resource_group
10 from azure.mgmt.resource.resources.models import ResourceGroup
11 from knack.log import get_logger
12 from knack.util import CLIError
13 from msrestazure.azure_exceptions import CloudError
14 from msrest.exceptions import ValidationError
15 from azext_db_up._client_factory import resource_client_factory
16 from azext_db_up.random_name.generate import generate_username
17 from azext_db_up.util import create_random_resource_name, get_config_value, set_config_value, remove_config_value
18
19 logger = get_logger(__name__)
20
21 DEFAULT_LOCATION = 'westus2'
22 DEFAULT_DATABASE_NAME = 'sampledb'
23
24
25 def db_up_namespace_processor(db_type):
26 return lambda cmd, namespace: _process_db_up_namespace(cmd, namespace, db_type=db_type)
27
28
29 def db_down_namespace_processor(db_type):
30 return lambda cmd, namespace: _process_db_down_namespace(namespace, db_type=db_type)
31
32
33 # pylint: disable=bare-except
34 def _process_db_up_namespace(cmd, namespace, db_type=None):
35 # populate from cache if existing when no resource group name provided
36 resource_client = resource_client_factory(cmd.cli_ctx)
37 if namespace.resource_group_name is None:
38 _set_value(db_type, namespace, 'resource_group_name', 'group', cache=False)
39 try:
40 resource_client.resource_groups.get(namespace.resource_group_name)
41 except: # Clear resource group name information when it is invalid
42 namespace.resource_group_name = None
43
44 # populate from cache if existing when no location provided
45 if namespace.location is None:
46 _set_value(db_type, namespace, 'location', 'location', cache=False)
47 # generate smart defaults when namespace.location is None
48 if _get_value(db_type, namespace, 'location', 'location') is None:
49 try:
50 get_default_location_from_resource_group(cmd, namespace)
51 except (CLIError, ValidationError):
52 namespace.location = 'eastus'
53 _set_value(db_type, namespace, 'location', 'location', default=namespace.location)
54
55 # When resource group name in namespace is different from what in cache, reset it and create new server name
56 if namespace.resource_group_name != get_config_value(db_type, 'group', None):
57 set_config_value(db_type, 'group', namespace.resource_group_name)
58 if namespace.server_name is None:
59 namespace.server_name = create_random_resource_name('server')
60 set_config_value(db_type, 'server', namespace.server_name)
61
62 # When no resource group name in namespace and cache, create new resource group with random name
63 create_resource_group = True
64 if namespace.resource_group_name is None: # No resource group provided and in cache
65 namespace.resource_group_name = create_random_resource_name('group')
66 else:
67 try:
68 resource_client.resource_groups.get(namespace.resource_group_name)
69 create_resource_group = False
70 except CloudError: # throw exception when resource group name is invalid
71 pass
72
73 if create_resource_group:
74 # create new resource group
75 params = ResourceGroup(location=namespace.location)
76 logger.warning('Creating Resource Group \'%s\'...', namespace.resource_group_name)
77 resource_client.resource_groups.create_or_update(namespace.resource_group_name, params)
78 _set_value(db_type, namespace, 'resource_group_name', 'group', default=namespace.resource_group_name)
79
80 _set_value(db_type, namespace, 'server_name', 'server', default=create_random_resource_name('server'))
81 _set_value(db_type, namespace, 'administrator_login', 'login', default=generate_username())
82 if namespace.generate_password:
83 namespace.administrator_login_password = str(uuid.uuid4())
84 del namespace.generate_password
85 _set_value(db_type, namespace, 'database_name', 'database', default=DEFAULT_DATABASE_NAME)
86
87
88 def _process_db_down_namespace(namespace, db_type=None):
89 # populate from cache if existing
90 if namespace.resource_group_name is None:
91 namespace.resource_group_name = _get_value(db_type, namespace, 'resource_group_name', 'group')
92 remove_config_value(db_type, 'group')
93 if namespace.server_name is None and not namespace.delete_group:
94 namespace.server_name = _get_value(db_type, namespace, 'server_name', 'server')
95 remove_config_value(db_type, 'server')
96 remove_config_value(db_type, 'login')
97 remove_config_value(db_type, 'database')
98 remove_config_value(db_type, 'location')
99
100 # put resource group info back in config if user does not want to delete it
101 if not namespace.delete_group and namespace.resource_group_name:
102 _set_value(db_type, namespace, 'resource_group_name', 'group')
103
104 # error handling
105 if namespace.delete_group and not namespace.resource_group_name:
106 raise CLIError("Please specify the resource group name to delete.")
107 if not namespace.delete_group and not namespace.resource_group_name and not namespace.server_name:
108 raise CLIError("Please specify the {} server name to delete and its resource group name if you only want to "
109 "delete the specific {} server.".format(db_type, db_type))
110
111
112 def _set_value(db_type, namespace, attribute, option, default=None, cache=True):
113 if getattr(namespace, attribute) is None:
114 try:
115 if get_config_value(db_type, option):
116 setattr(namespace, attribute, get_config_value(db_type, option))
117 else:
118 setattr(namespace, attribute, default)
119 except (configparser.NoSectionError, configparser.NoOptionError):
120 if default is not None:
121 setattr(namespace, attribute, default)
122 if cache:
123 set_config_value(db_type, option, getattr(namespace, attribute))
124
125
126 def _get_value(db_type, namespace, attribute, option):
127 return getattr(namespace, attribute, None) or get_config_value(db_type, option, None)
128
[end of src/db-up/azext_db_up/_validators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/db-up/azext_db_up/_validators.py b/src/db-up/azext_db_up/_validators.py
--- a/src/db-up/azext_db_up/_validators.py
+++ b/src/db-up/azext_db_up/_validators.py
@@ -48,7 +48,7 @@
if _get_value(db_type, namespace, 'location', 'location') is None:
try:
get_default_location_from_resource_group(cmd, namespace)
- except (CLIError, ValidationError):
+ except Exception:
namespace.location = 'eastus'
_set_value(db_type, namespace, 'location', 'location', default=namespace.location)
@@ -67,7 +67,7 @@
try:
resource_client.resource_groups.get(namespace.resource_group_name)
create_resource_group = False
- except CloudError: # throw exception when resource group name is invalid
+ except Exception: # throw exception when resource group name is invalid
pass
if create_resource_group:
diff --git a/src/db-up/setup.py b/src/db-up/setup.py
--- a/src/db-up/setup.py
+++ b/src/db-up/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.2.3"
+VERSION = "0.2.4"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
|
{"golden_diff": "diff --git a/src/db-up/azext_db_up/_validators.py b/src/db-up/azext_db_up/_validators.py\n--- a/src/db-up/azext_db_up/_validators.py\n+++ b/src/db-up/azext_db_up/_validators.py\n@@ -48,7 +48,7 @@\n if _get_value(db_type, namespace, 'location', 'location') is None:\n try:\n get_default_location_from_resource_group(cmd, namespace)\n- except (CLIError, ValidationError):\n+ except Exception:\n namespace.location = 'eastus'\n _set_value(db_type, namespace, 'location', 'location', default=namespace.location)\n \n@@ -67,7 +67,7 @@\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n create_resource_group = False\n- except CloudError: # throw exception when resource group name is invalid\n+ except Exception: # throw exception when resource group name is invalid\n pass\n \n if create_resource_group:\ndiff --git a/src/db-up/setup.py b/src/db-up/setup.py\n--- a/src/db-up/setup.py\n+++ b/src/db-up/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.2.3\"\n+VERSION = \"0.2.4\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "az postgres up does not create the resource group\n## Describe the bug\r\n**Tutorial link:** [Deploy a Django web app with PostgreSQL in Azure App Service](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone)\r\n\r\nIt is expected that the `az postgres up` command creates the resource group if it does not already exist, as mentioned in the [doc](https://docs.microsoft.com/en-us/azure/app-service/tutorial-python-postgresql-app?tabs=cmd%2Cclone#3-create-postgres-database-in-azure).\r\n\r\nRunning this command on Azure CLI version **2.22.1** and `db-up` extension version **0.2.3** works as expected.\r\n\r\n\r\nHowever, this does not work with the latest Azure CLI version **2.24.2** and `db-up` extension version **0.2.3**.\r\n\r\n**Command Name**\r\n`az postgres up\r\nExtension Name: db-up. Version: 0.2.3.`\r\n\r\n**Errors:**\r\n```\r\n(ResourceGroupNotFound) Resource group 'DjangoPostgres-tutorial-rg-2242' could not be found.\r\n```\r\n\r\n## To Reproduce:\r\nSteps to reproduce the behavior. Note that argument values have been redacted, as they may contain sensitive information.\r\n\r\n- Install Azure CLI version `2.24.2`.\r\n- `az postgres up --resource-group {} --location {} --sku-name {} --server-name {} --database-name {} --admin-user {} --admin-password {} --ssl-enforcement {}`\r\n\r\n## Expected Behavior\r\nThe command should create the specified resource group and proceed with provisioning the postgres db server, as shown in the screenshot above.\r\n\r\n## Environment Summary\r\n```\r\nWindows-10-10.0.19041-SP0\r\nPython 3.8.9\r\nInstaller: MSI\r\n\r\nazure-cli 2.24.2\r\nazure-cli-ml 1.24.0\r\n\r\nExtensions:\r\nazure-cli-ml 1.24.0\r\nazure-devops 0.18.0\r\nazure-iot 0.10.9\r\ndb-up 0.2.3\r\nmesh 0.10.6\r\nresource-graph 1.1.0\r\nscheduled-query 0.2.0\r\nspring-cloud 1.1.1\r\n\r\n```\r\n## Additional Context\r\nGitHub Issue: https://github.com/MicrosoftDocs/azure-docs/issues/76722\r\n\r\n<!--Please don't remove this:-->\r\n<!--auto-generated-->\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.2.3\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = [\n 'Cython==0.29.17',\n 'mysql-connector-python==8.0.14',\n 'psycopg2-binary==2.8.5'\n]\n\nsetup(\n name='db-up',\n version=VERSION,\n description='Additional commands to simplify Azure Database workflows.',\n long_description='An Azure CLI Extension to provide additional DB commands.',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/db-up',\n classifiers=CLASSIFIERS,\n package_data={'azext_db_up': ['azext_metadata.json', 'random_name/*']},\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/db-up/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=import-error\nimport uuid\nfrom six.moves import configparser\nfrom azure.cli.core.commands.validators import get_default_location_from_resource_group\nfrom azure.mgmt.resource.resources.models import ResourceGroup\nfrom knack.log import get_logger\nfrom knack.util import CLIError\nfrom msrestazure.azure_exceptions import CloudError\nfrom msrest.exceptions import ValidationError\nfrom azext_db_up._client_factory import resource_client_factory\nfrom azext_db_up.random_name.generate import generate_username\nfrom azext_db_up.util import create_random_resource_name, get_config_value, set_config_value, remove_config_value\n\nlogger = get_logger(__name__)\n\nDEFAULT_LOCATION = 'westus2'\nDEFAULT_DATABASE_NAME = 'sampledb'\n\n\ndef db_up_namespace_processor(db_type):\n return lambda cmd, namespace: _process_db_up_namespace(cmd, namespace, db_type=db_type)\n\n\ndef db_down_namespace_processor(db_type):\n return lambda cmd, namespace: _process_db_down_namespace(namespace, db_type=db_type)\n\n\n# pylint: disable=bare-except\ndef _process_db_up_namespace(cmd, namespace, db_type=None):\n # populate from cache if existing when no resource group name provided\n resource_client = resource_client_factory(cmd.cli_ctx)\n if namespace.resource_group_name is None:\n _set_value(db_type, namespace, 'resource_group_name', 'group', cache=False)\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n except: # Clear resource group name information when it is invalid\n namespace.resource_group_name = None\n\n # populate from cache if existing when no location provided\n if namespace.location is None:\n _set_value(db_type, namespace, 'location', 'location', cache=False)\n # generate smart defaults when namespace.location is None\n if _get_value(db_type, namespace, 'location', 'location') is None:\n try:\n get_default_location_from_resource_group(cmd, namespace)\n except (CLIError, ValidationError):\n namespace.location = 'eastus'\n _set_value(db_type, namespace, 'location', 'location', default=namespace.location)\n\n # When resource group name in namespace is different from what in cache, reset it and create new server name\n if namespace.resource_group_name != get_config_value(db_type, 'group', None):\n set_config_value(db_type, 'group', namespace.resource_group_name)\n if namespace.server_name is None:\n namespace.server_name = create_random_resource_name('server')\n set_config_value(db_type, 'server', namespace.server_name)\n\n # When no resource group name in namespace and cache, create new resource group with random name\n create_resource_group = True\n if namespace.resource_group_name is None: # No resource group provided and in cache\n namespace.resource_group_name = create_random_resource_name('group')\n else:\n try:\n resource_client.resource_groups.get(namespace.resource_group_name)\n create_resource_group = False\n except CloudError: # throw exception when resource group name is invalid\n pass\n\n if create_resource_group:\n # create new resource group\n params = ResourceGroup(location=namespace.location)\n logger.warning('Creating Resource Group \\'%s\\'...', namespace.resource_group_name)\n resource_client.resource_groups.create_or_update(namespace.resource_group_name, params)\n _set_value(db_type, namespace, 'resource_group_name', 'group', default=namespace.resource_group_name)\n\n _set_value(db_type, namespace, 'server_name', 'server', default=create_random_resource_name('server'))\n _set_value(db_type, namespace, 'administrator_login', 'login', default=generate_username())\n if namespace.generate_password:\n namespace.administrator_login_password = str(uuid.uuid4())\n del namespace.generate_password\n _set_value(db_type, namespace, 'database_name', 'database', default=DEFAULT_DATABASE_NAME)\n\n\ndef _process_db_down_namespace(namespace, db_type=None):\n # populate from cache if existing\n if namespace.resource_group_name is None:\n namespace.resource_group_name = _get_value(db_type, namespace, 'resource_group_name', 'group')\n remove_config_value(db_type, 'group')\n if namespace.server_name is None and not namespace.delete_group:\n namespace.server_name = _get_value(db_type, namespace, 'server_name', 'server')\n remove_config_value(db_type, 'server')\n remove_config_value(db_type, 'login')\n remove_config_value(db_type, 'database')\n remove_config_value(db_type, 'location')\n\n # put resource group info back in config if user does not want to delete it\n if not namespace.delete_group and namespace.resource_group_name:\n _set_value(db_type, namespace, 'resource_group_name', 'group')\n\n # error handling\n if namespace.delete_group and not namespace.resource_group_name:\n raise CLIError(\"Please specify the resource group name to delete.\")\n if not namespace.delete_group and not namespace.resource_group_name and not namespace.server_name:\n raise CLIError(\"Please specify the {} server name to delete and its resource group name if you only want to \"\n \"delete the specific {} server.\".format(db_type, db_type))\n\n\ndef _set_value(db_type, namespace, attribute, option, default=None, cache=True):\n if getattr(namespace, attribute) is None:\n try:\n if get_config_value(db_type, option):\n setattr(namespace, attribute, get_config_value(db_type, option))\n else:\n setattr(namespace, attribute, default)\n except (configparser.NoSectionError, configparser.NoOptionError):\n if default is not None:\n setattr(namespace, attribute, default)\n if cache:\n set_config_value(db_type, option, getattr(namespace, attribute))\n\n\ndef _get_value(db_type, namespace, attribute, option):\n return getattr(namespace, attribute, None) or get_config_value(db_type, option, None)\n", "path": "src/db-up/azext_db_up/_validators.py"}]}
| 3,127 | 310 |
gh_patches_debug_67107
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1078
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
toga-winforms\windows.py openFileDialog needs an "s"
**Describe the bug**
When using multiselect = True in winforms the returned value is a single file because
if dialog.ShowDialog() == WinForms.DialogResult.OK:
return dialog.FileName
should be
if dialog.ShowDialog() == WinForms.DialogResult.OK:
return dialog.FileName**s**
**To Reproduce**
fname = self.main_window.open_file_dialog(
title="Open the file",
multiselect=True
)
**Expected behavior**
returns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list.
**Environment:**
- Operating System: Mac OS 10.15.6 & Windows 10
- Python version: 3.8
- Software versions:
- Briefcase: 0.3.3
- Toga: 0.3.0 dev23
</issue>
<code>
[start of src/winforms/toga_winforms/window.py]
1 from toga import GROUP_BREAK, SECTION_BREAK
2
3 from .libs import Size, WinForms
4
5
6 class WinFormsViewport:
7 def __init__(self, native, frame):
8 self.native = native
9 self.frame = frame
10 self.baseline_dpi = 96
11
12 @property
13 def width(self):
14 # Treat `native=None` as a 0x0 viewport
15 if self.native is None:
16 return 0
17 return self.native.ClientSize.Width
18
19 @property
20 def height(self):
21 if self.native is None:
22 return 0
23 # Subtract any vertical shift of the frame. This is to allow
24 # for toolbars, or any other viewport-level decoration.
25 return self.native.ClientSize.Height - self.frame.vertical_shift
26
27 @property
28 def dpi(self):
29 if self.native is None:
30 return self.baseline_dpi
31 return self.native.CreateGraphics().DpiX
32
33
34 class Window:
35 def __init__(self, interface):
36 self.interface = interface
37 self.interface._impl = self
38 self.create()
39
40 def create(self):
41 self.native = WinForms.Form(self)
42 self.native.ClientSize = Size(*self.interface._size)
43 self.native.interface = self.interface
44 self.native.Resize += self.winforms_resize
45 self.toolbar_native = None
46 self.toolbar_items = None
47
48 def create_toolbar(self):
49 self.toolbar_native = WinForms.ToolStrip()
50 for cmd in self.interface.toolbar:
51 if cmd == GROUP_BREAK:
52 item = WinForms.ToolStripSeparator()
53 elif cmd == SECTION_BREAK:
54 item = WinForms.ToolStripSeparator()
55 else:
56 if cmd.icon is not None:
57 native_icon = cmd.icon._impl.native
58 item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())
59 else:
60 item = WinForms.ToolStripMenuItem(cmd.label)
61 item.Click += cmd._impl.as_handler()
62 cmd._impl.native.append(item)
63 self.toolbar_native.Items.Add(item)
64
65 def set_position(self, position):
66 pass
67
68 def set_size(self, size):
69 self.native.ClientSize = Size(*self.interface._size)
70
71 def set_app(self, app):
72 if app is None:
73 return
74 icon_impl = app.interface.icon._impl
75 if icon_impl is None:
76 return
77 self.native.Icon = icon_impl.native
78
79 @property
80 def vertical_shift(self):
81 # vertical shift is the toolbar height or 0
82 result = 0
83 try:
84 result += self.native.interface._impl.toolbar_native.Height
85 except AttributeError:
86 pass
87 try:
88 result += self.native.interface._impl.native.MainMenuStrip.Height
89 except AttributeError:
90 pass
91 return result
92
93 def set_content(self, widget):
94 if self.toolbar_native:
95 self.native.Controls.Add(self.toolbar_native)
96 # Create the lookup table of menu items,
97 # then force the creation of the menus.
98 self.native.Controls.Add(widget.native)
99
100 # Set the widget's viewport to be based on the window's content.
101 widget.viewport = WinFormsViewport(native=self.native, frame=self)
102 widget.frame = self
103
104 # Add all children to the content widget.
105 for child in widget.interface.children:
106 child._impl.container = widget
107
108 def set_title(self, title):
109 self.native.Text = title
110
111 def show(self):
112 # The first render of the content will establish the
113 # minimum possible content size; use that to enforce
114 # a minimum window size.
115 TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight
116 # Now that the content is visible, we can do our initial hinting,
117 # and use that as the basis for setting the minimum window size.
118 self.interface.content._impl.rehint()
119 self.interface.content.style.layout(
120 self.interface.content,
121 WinFormsViewport(native=None, frame=None),
122 )
123 self.native.MinimumSize = Size(
124 int(self.interface.content.layout.width),
125 int(self.interface.content.layout.height) + TITLEBAR_HEIGHT
126 )
127 self.interface.content.refresh()
128
129 self.native.Show()
130
131 def winforms_FormClosing(self, event, handler):
132 if self.interface.app.on_exit:
133 self.interface.app.on_exit(self.interface.app)
134
135 def set_full_screen(self, is_full_screen):
136 self.interface.factory.not_implemented('Window.set_full_screen()')
137
138 def on_close(self):
139 pass
140
141 def close(self):
142 self.native.Close()
143
144 def winforms_resize(self, sender, args):
145 if self.interface.content:
146 # Re-layout the content
147 self.interface.content.refresh()
148
149 def info_dialog(self, title, message):
150 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)
151
152 def question_dialog(self, title, message):
153 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)
154 return result
155
156 def confirm_dialog(self, title, message):
157 result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)
158 # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel
159 return True if result == WinForms.DialogResult.OK else False
160
161 def error_dialog(self, title, message):
162 return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,
163 WinForms.MessageBoxIcon.Error)
164
165 def stack_trace_dialog(self, title, message, content, retry=False):
166 pass
167
168 def save_file_dialog(self, title, suggested_filename, file_types):
169 dialog = WinForms.SaveFileDialog()
170 dialog.Title = title
171 if suggested_filename is not None:
172 dialog.FileName = suggested_filename
173 if file_types is not None:
174 dialog.Filter = self.build_filter(file_types)
175 if dialog.ShowDialog() == WinForms.DialogResult.OK:
176 return dialog.FileName
177 else:
178 raise ValueError("No filename provided in the save file dialog")
179
180 def open_file_dialog(self, title, initial_directory, file_types, multiselect):
181 dialog = WinForms.OpenFileDialog()
182 dialog.Title = title
183 if initial_directory is not None:
184 dialog.InitialDirectory = initial_directory
185 if file_types is not None:
186 dialog.Filter = self.build_filter(file_types)
187 if multiselect:
188 dialog.Multiselect = True
189 if dialog.ShowDialog() == WinForms.DialogResult.OK:
190 return dialog.FileName
191 else:
192 raise ValueError("No filename provided in the open file dialog")
193
194 def select_folder_dialog(self, title, initial_directory, multiselect):
195 dialog = WinForms.FolderBrowserDialog()
196 dialog.Title = title
197 if initial_directory is not None:
198 dialog.InitialDirectory = initial_directory
199
200 if dialog.ShowDialog() == WinForms.DialogResult.OK:
201 return [dialog.SelectedPath]
202 else:
203 raise ValueError("No folder provided in the select folder dialog")
204
205 def build_filter(self, file_types):
206 file_string = "{0} files (*.{0})|*.{0}"
207 return '|'.join([file_string.format(ext) for ext in file_types]) + \
208 "|All files (*.*)|*.*"
209
[end of src/winforms/toga_winforms/window.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py
--- a/src/winforms/toga_winforms/window.py
+++ b/src/winforms/toga_winforms/window.py
@@ -187,7 +187,7 @@
if multiselect:
dialog.Multiselect = True
if dialog.ShowDialog() == WinForms.DialogResult.OK:
- return dialog.FileName
+ return dialog.FileNames if multiselect else dialog.FileName
else:
raise ValueError("No filename provided in the open file dialog")
|
{"golden_diff": "diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py\n--- a/src/winforms/toga_winforms/window.py\n+++ b/src/winforms/toga_winforms/window.py\n@@ -187,7 +187,7 @@\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n- return dialog.FileName\n+ return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n", "issue": "toga-winforms\\windows.py openFileDialog needs an \"s\"\n**Describe the bug**\r\nWhen using multiselect = True in winforms the returned value is a single file because \r\n\r\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\r\n return dialog.FileName\r\n\r\nshould be \r\n\r\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\r\n return dialog.FileName**s**\r\n\r\n \r\n\r\n**To Reproduce**\r\n\r\n fname = self.main_window.open_file_dialog(\r\n title=\"Open the file\",\r\n multiselect=True\r\n )\r\n\r\n\r\n**Expected behavior**\r\nreturns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list.\r\n\r\n\r\n**Environment:**\r\n - Operating System: Mac OS 10.15.6 & Windows 10\r\n - Python version: 3.8\r\n - Software versions:\r\n - Briefcase: 0.3.3\r\n - Toga: 0.3.0 dev23\r\n\n", "before_files": [{"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n", "path": "src/winforms/toga_winforms/window.py"}]}
| 2,785 | 120 |
gh_patches_debug_8679
|
rasdani/github-patches
|
git_diff
|
ydataai__ydata-profiling-728
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ExpectationProfiler fails to profile a datetime column
**Describe the bug**
I've encountered the following `TypeError` when trying to profile a dataframe that has a datetime column
<details><summary>Click to expand <strong><em>Traceback</em></strong></summary>
```python
profile.to_expectation_suite(suite_name=f"{schema}.{table}")
[03/11/21 14:49:33] INFO INFO - 2021-03-11 14:49:33,088 - utils.py - NumExpr utils.py:157
defaulting to 4 threads.
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/ds-vroom-demand/great_expectations/uncommitted/test_profile.py in
----> 15 profile.to_expectation_suite(suite_name=f"{schema}.{table}")
~/.pyenv/versions/vroom/lib/python3.7/site-packages/pandas_profiling/expectations_report.py in to_expectation_suite(self, suite_name, data_context, save_suite, run_validation, build_data_docs, handler)
88 # Dispatch to expectations per semantic variable type
89 for name, variable_summary in summary["variables"].items():
---> 90 handler.handle(variable_summary["type"], name, variable_summary, batch)
91
92 # We don't actually update the suite object on the batch in place, so need
...
~/.pyenv/versions/vroom/lib/python3.7/site-packages/great_expectations/dataset/pandas_dataset.py in is_between(val)
1223 ):
1224 raise TypeError(
-> 1225 "Column values, min_value, and max_value must either be None or of the same type."
1226 )
1227
```
</details>
```python
TypeError: Column values, min_value, and max_value must either be None or of the same type.
```
From looking at the arguments to `is_between` in `great_expectations.dataset.pandas_dataset.py`, this seems to be due to `min_value` and `max_value` being serialised to their string representations, but the column itself doesn't.
I think this can be resolved by modifying the arguments to `datetime_expectations` in `pandas_profiling.model.expectation_algorithms` as follows
```python
def datetime_expectations(name, summary, batch, *args):
if any(k in summary for k in ["min", "max"]):
batch.expect_column_values_to_be_between(
name, min_value=summary.get("min"), max_value=summary.get("max"), parse_strings_as_datetimes=True
)
return name, summary, batch
```
I've been able to resolve this issue myself by altering that function. I'm happy to submit this as a PR if this is acceptable.
**To Reproduce**
```python
"""
Test for issue XXX:
https://github.com/pandas-profiling/pandas-profiling/issues/XXX
"""
import pandas as pd
import pandas_profiling
def test_issueXXX():
df = pd.DataFrame({'date':pd.date_range('2021-01-01', '2021-03-01', freq='d')})
profile = pandas_profiling.ProfileReport(df=df, explorative=True)
profile.to_expectation_suite(suite_name='test', save_suite=False, build_data_docs=False, run_validation=False)
```
**Version information:**
* _Python version_: 3.7.6
* _Environment_: Pycharm
* _`pip`_:
<details><summary>Click to expand <strong><em>Version information</em></strong></summary>
<p>
```
alembic==1.4.1
altair==4.1.0
appnope==0.1.2
argon2-cffi==20.1.0
arrow==0.17.0
astor==0.8.1
async-generator==1.10
attrs==20.3.0
azure-core==1.11.0
azure-storage-blob==12.7.1
backcall==0.2.0
base58==2.1.0
binaryornot==0.4.4
bleach==3.2.3
blinker==1.4
boto3==1.17.2
botocore==1.20.2
cachetools==4.2.1
category-encoders==2.2.2
certifi==2020.12.5
cffi==1.14.4
chardet==3.0.4
click==7.1.2
cloudpickle==1.6.0
colorama==0.4.4
commonmark==0.9.1
confuse==1.4.0
cookiecutter==1.7.2
cryptography==3.3.1
cycler==0.10.0
dask==2021.2.0
databricks-cli==0.14.1
decorator==4.4.2
defusedxml==0.6.0
distributed==2021.2.0
docker==4.4.2
docutils==0.16
entrypoints==0.3
Faker==6.0.0
fastapi==0.63.0
featuretools==0.23.1
Flask==1.1.2
fsspec==0.8.5
gitdb==4.0.5
GitPython==3.1.12
great-expectations==0.13.8
gunicorn==20.0.4
HeapDict==1.0.1
htmlmin==0.1.12
hypothesis==6.3.4
idna==2.10
ImageHash==4.2.0
importlib-metadata==2.1.1
iniconfig==1.1.1
ipykernel==5.4.3
ipython==7.19.0
ipython-genutils==0.2.0
ipywidgets==7.6.3
isodate==0.6.0
itsdangerous==1.1.0
jedi==0.18.0
Jinja2==2.11.2
jinja2-time==0.2.0
jinjasql==0.1.8
jmespath==0.10.0
joblib==1.0.0
jsonpatch==1.28
jsonpointer==2.0
jsonschema==3.2.0
jupyter==1.0.0
jupyter-client==6.1.11
jupyter-console==6.2.0
jupyter-core==4.7.0
jupyterlab-pygments==0.1.2
jupyterlab-widgets==1.0.0
kiwisolver==1.3.1
lightgbm==3.1.1
locket==0.2.1
loky==2.9.0
Mako==1.1.4
MarkupSafe==1.1.1
matplotlib==3.3.3
missingno==0.4.2
mistune==0.8.4
mlflow==1.13.1
mock==4.0.3
msgpack==1.0.2
msrest==0.6.21
mypy-extensions==0.4.3
nbclient==0.5.1
nbconvert==6.0.7
nbformat==5.1.2
nest-asyncio==1.5.1
networkx==2.5
nltk==3.5
notebook==6.2.0
numpy==1.19.5
oauthlib==3.1.0
opt-einsum==3.3.0
packaging==20.8
pandas==1.2.1
pandas-profiling==2.11.0
pandera==0.6.2
pandocfilters==1.4.3
parso==0.8.1
partd==1.1.0
patsy==0.5.1
pexpect==4.8.0
phik==0.11.2
pickleshare==0.7.5
Pillow==8.1.0
pluggy==0.13.1
poyo==0.5.0
prometheus-client==0.9.0
prometheus-flask-exporter==0.18.1
prompt-toolkit==3.0.14
protobuf==3.14.0
psutil==5.8.0
psycopg2-binary==2.8.6
ptyprocess==0.7.0
py==1.10.0
pycparser==2.20
pydantic==1.7.3
pydeck==0.5.0
Pygments==2.7.4
pyOpenSSL==20.0.1
pyparsing==2.4.7
PyPika==0.47.4
pyro-api==0.1.2
pyro-ppl==1.5.2
pyrsistent==0.17.3
pytest==6.2.2
python-dateutil==2.8.1
python-dotenv==0.15.0
python-editor==1.0.4
python-slugify==4.0.1
pytz==2020.5
PyWavelets==1.1.1
PyYAML==5.4.1
pyzmq==21.0.2
qtconsole==5.0.2
QtPy==1.9.0
querystring-parser==1.2.4
regex==2020.11.13
requests==2.25.1
requests-oauthlib==1.3.0
rich==9.11.0
rpy2==3.4.2
ruamel.yaml==0.16.12
s3transfer==0.3.4
scikit-learn==0.24.1
scipy==1.6.0
seaborn==0.11.1
Send2Trash==1.5.0
shellingham==1.4.0
six==1.15.0
smart-open==4.2.0
smmap==3.0.5
sortedcontainers==2.3.0
SQLAlchemy==1.3.23
sqlparse==0.4.1
starlette==0.13.6
statsmodels==0.12.2
streamlit==0.77.0
tabulate==0.8.7
tangled-up-in-unicode==0.0.6
tblib==1.7.0
termcolor==1.1.0
terminado==0.9.2
testpath==0.4.4
text-unidecode==1.3
threadpoolctl==2.1.0
toml==0.10.2
toolz==0.11.1
torch==1.7.1
tornado==6.1
tqdm==4.56.0
traitlets==5.0.5
typer==0.3.2
typer-cli==0.0.11
typing-extensions==3.7.4.3
typing-inspect==0.6.0
tzlocal==2.1
urllib3==1.25.11
validators==0.18.2
visions==0.6.0
wcwidth==0.2.5
webencodings==0.5.1
websocket-client==0.57.0
Werkzeug==1.0.1
widgetsnbextension==3.5.1
wrapt==1.12.1
zict==2.0.0
zipp==3.4.0
```
</p></summary></details>
</issue>
<code>
[start of src/pandas_profiling/model/expectation_algorithms.py]
1 def generic_expectations(name, summary, batch, *args):
2 batch.expect_column_to_exist(name)
3
4 if summary["n_missing"] == 0:
5 batch.expect_column_values_to_not_be_null(name)
6
7 if summary["p_unique"] == 1.0:
8 batch.expect_column_values_to_be_unique(name)
9
10 return name, summary, batch
11
12
13 def numeric_expectations(name, summary, batch, *args):
14 from great_expectations.profile.base import ProfilerTypeMapping
15
16 numeric_type_names = (
17 ProfilerTypeMapping.INT_TYPE_NAMES + ProfilerTypeMapping.FLOAT_TYPE_NAMES
18 )
19
20 batch.expect_column_values_to_be_in_type_list(
21 name,
22 numeric_type_names,
23 meta={
24 "notes": {
25 "format": "markdown",
26 "content": [
27 "The column values should be stored in one of these types."
28 ],
29 }
30 },
31 )
32
33 if summary["monotonic_increase"]:
34 batch.expect_column_values_to_be_increasing(
35 name, strictly=summary["monotonic_increase_strict"]
36 )
37
38 if summary["monotonic_decrease"]:
39 batch.expect_column_values_to_be_decreasing(
40 name, strictly=summary["monotonic_decrease_strict"]
41 )
42
43 if any(k in summary for k in ["min", "max"]):
44 batch.expect_column_values_to_be_between(
45 name, min_value=summary.get("min"), max_value=summary.get("max")
46 )
47
48 return name, summary, batch
49
50
51 def categorical_expectations(name, summary, batch, *args):
52 # Use for both categorical and special case (boolean)
53 absolute_threshold = 10
54 relative_threshold = 0.2
55 if (
56 summary["n_distinct"] < absolute_threshold
57 or summary["p_distinct"] < relative_threshold
58 ):
59 batch.expect_column_values_to_be_in_set(
60 name, set(summary["value_counts_without_nan"].keys())
61 )
62 return name, summary, batch
63
64
65 def path_expectations(name, summary, batch, *args):
66 return name, summary, batch
67
68
69 def datetime_expectations(name, summary, batch, *args):
70 if any(k in summary for k in ["min", "max"]):
71 batch.expect_column_values_to_be_between(
72 name, min_value=summary.get("min"), max_value=summary.get("max")
73 )
74
75 return name, summary, batch
76
77
78 def image_expectations(name, summary, batch, *args):
79 return name, summary, batch
80
81
82 def url_expectations(name, summary, batch, *args):
83 return name, summary, batch
84
85
86 def file_expectations(name, summary, batch, *args):
87 # By definition within our type logic, a file exists (as it's a path that also exists)
88 batch.expect_file_to_exist(name)
89
90 return name, summary, batch
91
[end of src/pandas_profiling/model/expectation_algorithms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pandas_profiling/model/expectation_algorithms.py b/src/pandas_profiling/model/expectation_algorithms.py
--- a/src/pandas_profiling/model/expectation_algorithms.py
+++ b/src/pandas_profiling/model/expectation_algorithms.py
@@ -69,7 +69,10 @@
def datetime_expectations(name, summary, batch, *args):
if any(k in summary for k in ["min", "max"]):
batch.expect_column_values_to_be_between(
- name, min_value=summary.get("min"), max_value=summary.get("max")
+ name,
+ min_value=summary.get("min"),
+ max_value=summary.get("max"),
+ parse_strings_as_datetimes=True,
)
return name, summary, batch
|
{"golden_diff": "diff --git a/src/pandas_profiling/model/expectation_algorithms.py b/src/pandas_profiling/model/expectation_algorithms.py\n--- a/src/pandas_profiling/model/expectation_algorithms.py\n+++ b/src/pandas_profiling/model/expectation_algorithms.py\n@@ -69,7 +69,10 @@\n def datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n- name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n+ name,\r\n+ min_value=summary.get(\"min\"),\r\n+ max_value=summary.get(\"max\"),\r\n+ parse_strings_as_datetimes=True,\r\n )\r\n \r\n return name, summary, batch\n", "issue": "ExpectationProfiler fails to profile a datetime column\n**Describe the bug**\r\nI've encountered the following `TypeError` when trying to profile a dataframe that has a datetime column\r\n<details><summary>Click to expand <strong><em>Traceback</em></strong></summary>\r\n\r\n```python\r\n\r\nprofile.to_expectation_suite(suite_name=f\"{schema}.{table}\")\r\n[03/11/21 14:49:33] INFO INFO - 2021-03-11 14:49:33,088 - utils.py - NumExpr utils.py:157\r\n defaulting to 4 threads. \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/ds-vroom-demand/great_expectations/uncommitted/test_profile.py in \r\n----> 15 profile.to_expectation_suite(suite_name=f\"{schema}.{table}\")\r\n\r\n~/.pyenv/versions/vroom/lib/python3.7/site-packages/pandas_profiling/expectations_report.py in to_expectation_suite(self, suite_name, data_context, save_suite, run_validation, build_data_docs, handler)\r\n 88 # Dispatch to expectations per semantic variable type\r\n 89 for name, variable_summary in summary[\"variables\"].items():\r\n---> 90 handler.handle(variable_summary[\"type\"], name, variable_summary, batch)\r\n 91 \r\n 92 # We don't actually update the suite object on the batch in place, so need\r\n\r\n...\r\n~/.pyenv/versions/vroom/lib/python3.7/site-packages/great_expectations/dataset/pandas_dataset.py in is_between(val)\r\n 1223 ):\r\n 1224 raise TypeError(\r\n-> 1225 \"Column values, min_value, and max_value must either be None or of the same type.\"\r\n 1226 )\r\n 1227 \r\n\r\n```\r\n</details>\r\n\r\n```python\r\nTypeError: Column values, min_value, and max_value must either be None or of the same type.\r\n```\r\n\r\nFrom looking at the arguments to `is_between` in `great_expectations.dataset.pandas_dataset.py`, this seems to be due to `min_value` and `max_value` being serialised to their string representations, but the column itself doesn't.\r\n\r\nI think this can be resolved by modifying the arguments to `datetime_expectations` in `pandas_profiling.model.expectation_algorithms` as follows \r\n\r\n```python\r\ndef datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\"), parse_strings_as_datetimes=True\r\n )\r\n\r\n return name, summary, batch\r\n```\r\nI've been able to resolve this issue myself by altering that function. I'm happy to submit this as a PR if this is acceptable.\r\n\r\n**To Reproduce**\r\n```python\r\n\"\"\"\r\nTest for issue XXX:\r\nhttps://github.com/pandas-profiling/pandas-profiling/issues/XXX\r\n\"\"\"\r\nimport pandas as pd\r\nimport pandas_profiling\r\n\r\n\r\ndef test_issueXXX():\r\n df = pd.DataFrame({'date':pd.date_range('2021-01-01', '2021-03-01', freq='d')})\r\n profile = pandas_profiling.ProfileReport(df=df, explorative=True)\r\n profile.to_expectation_suite(suite_name='test', save_suite=False, build_data_docs=False, run_validation=False)\r\n \r\n```\r\n\r\n**Version information:**\r\n\r\n* _Python version_: 3.7.6\r\n* _Environment_: Pycharm\r\n* _`pip`_: \r\n\r\n<details><summary>Click to expand <strong><em>Version information</em></strong></summary>\r\n<p>\r\n\r\n```\r\nalembic==1.4.1\r\naltair==4.1.0\r\nappnope==0.1.2\r\nargon2-cffi==20.1.0\r\narrow==0.17.0\r\nastor==0.8.1\r\nasync-generator==1.10\r\nattrs==20.3.0\r\nazure-core==1.11.0\r\nazure-storage-blob==12.7.1\r\nbackcall==0.2.0\r\nbase58==2.1.0\r\nbinaryornot==0.4.4\r\nbleach==3.2.3\r\nblinker==1.4\r\nboto3==1.17.2\r\nbotocore==1.20.2\r\ncachetools==4.2.1\r\ncategory-encoders==2.2.2\r\ncertifi==2020.12.5\r\ncffi==1.14.4\r\nchardet==3.0.4\r\nclick==7.1.2\r\ncloudpickle==1.6.0\r\ncolorama==0.4.4\r\ncommonmark==0.9.1\r\nconfuse==1.4.0\r\ncookiecutter==1.7.2\r\ncryptography==3.3.1\r\ncycler==0.10.0\r\ndask==2021.2.0\r\ndatabricks-cli==0.14.1\r\ndecorator==4.4.2\r\ndefusedxml==0.6.0\r\ndistributed==2021.2.0\r\ndocker==4.4.2\r\ndocutils==0.16\r\nentrypoints==0.3\r\nFaker==6.0.0\r\nfastapi==0.63.0\r\nfeaturetools==0.23.1\r\nFlask==1.1.2\r\nfsspec==0.8.5\r\ngitdb==4.0.5\r\nGitPython==3.1.12\r\ngreat-expectations==0.13.8\r\ngunicorn==20.0.4\r\nHeapDict==1.0.1\r\nhtmlmin==0.1.12\r\nhypothesis==6.3.4\r\nidna==2.10\r\nImageHash==4.2.0\r\nimportlib-metadata==2.1.1\r\niniconfig==1.1.1\r\nipykernel==5.4.3\r\nipython==7.19.0\r\nipython-genutils==0.2.0\r\nipywidgets==7.6.3\r\nisodate==0.6.0\r\nitsdangerous==1.1.0\r\njedi==0.18.0\r\nJinja2==2.11.2\r\njinja2-time==0.2.0\r\njinjasql==0.1.8\r\njmespath==0.10.0\r\njoblib==1.0.0\r\njsonpatch==1.28\r\njsonpointer==2.0\r\njsonschema==3.2.0\r\njupyter==1.0.0\r\njupyter-client==6.1.11\r\njupyter-console==6.2.0\r\njupyter-core==4.7.0\r\njupyterlab-pygments==0.1.2\r\njupyterlab-widgets==1.0.0\r\nkiwisolver==1.3.1\r\nlightgbm==3.1.1\r\nlocket==0.2.1\r\nloky==2.9.0\r\nMako==1.1.4\r\nMarkupSafe==1.1.1\r\nmatplotlib==3.3.3\r\nmissingno==0.4.2\r\nmistune==0.8.4\r\nmlflow==1.13.1\r\nmock==4.0.3\r\nmsgpack==1.0.2\r\nmsrest==0.6.21\r\nmypy-extensions==0.4.3\r\nnbclient==0.5.1\r\nnbconvert==6.0.7\r\nnbformat==5.1.2\r\nnest-asyncio==1.5.1\r\nnetworkx==2.5\r\nnltk==3.5\r\nnotebook==6.2.0\r\nnumpy==1.19.5\r\noauthlib==3.1.0\r\nopt-einsum==3.3.0\r\npackaging==20.8\r\npandas==1.2.1\r\npandas-profiling==2.11.0\r\npandera==0.6.2\r\npandocfilters==1.4.3\r\nparso==0.8.1\r\npartd==1.1.0\r\npatsy==0.5.1\r\npexpect==4.8.0\r\nphik==0.11.2\r\npickleshare==0.7.5\r\nPillow==8.1.0\r\npluggy==0.13.1\r\npoyo==0.5.0\r\nprometheus-client==0.9.0\r\nprometheus-flask-exporter==0.18.1\r\nprompt-toolkit==3.0.14\r\nprotobuf==3.14.0\r\npsutil==5.8.0\r\npsycopg2-binary==2.8.6\r\nptyprocess==0.7.0\r\npy==1.10.0\r\npycparser==2.20\r\npydantic==1.7.3\r\npydeck==0.5.0\r\nPygments==2.7.4\r\npyOpenSSL==20.0.1\r\npyparsing==2.4.7\r\nPyPika==0.47.4\r\npyro-api==0.1.2\r\npyro-ppl==1.5.2\r\npyrsistent==0.17.3\r\npytest==6.2.2\r\npython-dateutil==2.8.1\r\npython-dotenv==0.15.0\r\npython-editor==1.0.4\r\npython-slugify==4.0.1\r\npytz==2020.5\r\nPyWavelets==1.1.1\r\nPyYAML==5.4.1\r\npyzmq==21.0.2\r\nqtconsole==5.0.2\r\nQtPy==1.9.0\r\nquerystring-parser==1.2.4\r\nregex==2020.11.13\r\nrequests==2.25.1\r\nrequests-oauthlib==1.3.0\r\nrich==9.11.0\r\nrpy2==3.4.2\r\nruamel.yaml==0.16.12\r\ns3transfer==0.3.4\r\nscikit-learn==0.24.1\r\nscipy==1.6.0\r\nseaborn==0.11.1\r\nSend2Trash==1.5.0\r\nshellingham==1.4.0\r\nsix==1.15.0\r\nsmart-open==4.2.0\r\nsmmap==3.0.5\r\nsortedcontainers==2.3.0\r\nSQLAlchemy==1.3.23\r\nsqlparse==0.4.1\r\nstarlette==0.13.6\r\nstatsmodels==0.12.2\r\nstreamlit==0.77.0\r\ntabulate==0.8.7\r\ntangled-up-in-unicode==0.0.6\r\ntblib==1.7.0\r\ntermcolor==1.1.0\r\nterminado==0.9.2\r\ntestpath==0.4.4\r\ntext-unidecode==1.3\r\nthreadpoolctl==2.1.0\r\ntoml==0.10.2\r\ntoolz==0.11.1\r\ntorch==1.7.1\r\ntornado==6.1\r\ntqdm==4.56.0\r\ntraitlets==5.0.5\r\ntyper==0.3.2\r\ntyper-cli==0.0.11\r\ntyping-extensions==3.7.4.3\r\ntyping-inspect==0.6.0\r\ntzlocal==2.1\r\nurllib3==1.25.11\r\nvalidators==0.18.2\r\nvisions==0.6.0\r\nwcwidth==0.2.5\r\nwebencodings==0.5.1\r\nwebsocket-client==0.57.0\r\nWerkzeug==1.0.1\r\nwidgetsnbextension==3.5.1\r\nwrapt==1.12.1\r\nzict==2.0.0\r\nzipp==3.4.0\r\n```\r\n</p></summary></details>\r\n\r\n\n", "before_files": [{"content": "def generic_expectations(name, summary, batch, *args):\r\n batch.expect_column_to_exist(name)\r\n\r\n if summary[\"n_missing\"] == 0:\r\n batch.expect_column_values_to_not_be_null(name)\r\n\r\n if summary[\"p_unique\"] == 1.0:\r\n batch.expect_column_values_to_be_unique(name)\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef numeric_expectations(name, summary, batch, *args):\r\n from great_expectations.profile.base import ProfilerTypeMapping\r\n\r\n numeric_type_names = (\r\n ProfilerTypeMapping.INT_TYPE_NAMES + ProfilerTypeMapping.FLOAT_TYPE_NAMES\r\n )\r\n\r\n batch.expect_column_values_to_be_in_type_list(\r\n name,\r\n numeric_type_names,\r\n meta={\r\n \"notes\": {\r\n \"format\": \"markdown\",\r\n \"content\": [\r\n \"The column values should be stored in one of these types.\"\r\n ],\r\n }\r\n },\r\n )\r\n\r\n if summary[\"monotonic_increase\"]:\r\n batch.expect_column_values_to_be_increasing(\r\n name, strictly=summary[\"monotonic_increase_strict\"]\r\n )\r\n\r\n if summary[\"monotonic_decrease\"]:\r\n batch.expect_column_values_to_be_decreasing(\r\n name, strictly=summary[\"monotonic_decrease_strict\"]\r\n )\r\n\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n )\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef categorical_expectations(name, summary, batch, *args):\r\n # Use for both categorical and special case (boolean)\r\n absolute_threshold = 10\r\n relative_threshold = 0.2\r\n if (\r\n summary[\"n_distinct\"] < absolute_threshold\r\n or summary[\"p_distinct\"] < relative_threshold\r\n ):\r\n batch.expect_column_values_to_be_in_set(\r\n name, set(summary[\"value_counts_without_nan\"].keys())\r\n )\r\n return name, summary, batch\r\n\r\n\r\ndef path_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef datetime_expectations(name, summary, batch, *args):\r\n if any(k in summary for k in [\"min\", \"max\"]):\r\n batch.expect_column_values_to_be_between(\r\n name, min_value=summary.get(\"min\"), max_value=summary.get(\"max\")\r\n )\r\n\r\n return name, summary, batch\r\n\r\n\r\ndef image_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef url_expectations(name, summary, batch, *args):\r\n return name, summary, batch\r\n\r\n\r\ndef file_expectations(name, summary, batch, *args):\r\n # By definition within our type logic, a file exists (as it's a path that also exists)\r\n batch.expect_file_to_exist(name)\r\n\r\n return name, summary, batch\r\n", "path": "src/pandas_profiling/model/expectation_algorithms.py"}]}
| 4,063 | 175 |
gh_patches_debug_23173
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate Response.body_as_unicode
</issue>
<code>
[start of scrapy/http/response/text.py]
1 """
2 This module implements the TextResponse class which adds encoding handling and
3 discovering (through HTTP headers) to base Response class.
4
5 See documentation in docs/topics/request-response.rst
6 """
7
8 from contextlib import suppress
9 from typing import Generator
10 from urllib.parse import urljoin
11
12 import parsel
13 from w3lib.encoding import (html_body_declared_encoding, html_to_unicode,
14 http_content_type_encoding, resolve_encoding)
15 from w3lib.html import strip_html5_whitespace
16
17 from scrapy.http import Request
18 from scrapy.http.response import Response
19 from scrapy.utils.python import memoizemethod_noargs, to_unicode
20 from scrapy.utils.response import get_base_url
21
22
23 class TextResponse(Response):
24
25 _DEFAULT_ENCODING = 'ascii'
26
27 def __init__(self, *args, **kwargs):
28 self._encoding = kwargs.pop('encoding', None)
29 self._cached_benc = None
30 self._cached_ubody = None
31 self._cached_selector = None
32 super(TextResponse, self).__init__(*args, **kwargs)
33
34 def _set_url(self, url):
35 if isinstance(url, str):
36 self._url = to_unicode(url, self.encoding)
37 else:
38 super(TextResponse, self)._set_url(url)
39
40 def _set_body(self, body):
41 self._body = b'' # used by encoding detection
42 if isinstance(body, str):
43 if self._encoding is None:
44 raise TypeError('Cannot convert unicode body - %s has no encoding' %
45 type(self).__name__)
46 self._body = body.encode(self._encoding)
47 else:
48 super(TextResponse, self)._set_body(body)
49
50 def replace(self, *args, **kwargs):
51 kwargs.setdefault('encoding', self.encoding)
52 return Response.replace(self, *args, **kwargs)
53
54 @property
55 def encoding(self):
56 return self._declared_encoding() or self._body_inferred_encoding()
57
58 def _declared_encoding(self):
59 return self._encoding or self._headers_encoding() \
60 or self._body_declared_encoding()
61
62 def body_as_unicode(self):
63 """Return body as unicode"""
64 return self.text
65
66 @property
67 def text(self):
68 """ Body as unicode """
69 # access self.encoding before _cached_ubody to make sure
70 # _body_inferred_encoding is called
71 benc = self.encoding
72 if self._cached_ubody is None:
73 charset = 'charset=%s' % benc
74 self._cached_ubody = html_to_unicode(charset, self.body)[1]
75 return self._cached_ubody
76
77 def urljoin(self, url):
78 """Join this Response's url with a possible relative url to form an
79 absolute interpretation of the latter."""
80 return urljoin(get_base_url(self), url)
81
82 @memoizemethod_noargs
83 def _headers_encoding(self):
84 content_type = self.headers.get(b'Content-Type', b'')
85 return http_content_type_encoding(to_unicode(content_type))
86
87 def _body_inferred_encoding(self):
88 if self._cached_benc is None:
89 content_type = to_unicode(self.headers.get(b'Content-Type', b''))
90 benc, ubody = html_to_unicode(content_type, self.body,
91 auto_detect_fun=self._auto_detect_fun,
92 default_encoding=self._DEFAULT_ENCODING)
93 self._cached_benc = benc
94 self._cached_ubody = ubody
95 return self._cached_benc
96
97 def _auto_detect_fun(self, text):
98 for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
99 try:
100 text.decode(enc)
101 except UnicodeError:
102 continue
103 return resolve_encoding(enc)
104
105 @memoizemethod_noargs
106 def _body_declared_encoding(self):
107 return html_body_declared_encoding(self.body)
108
109 @property
110 def selector(self):
111 from scrapy.selector import Selector
112 if self._cached_selector is None:
113 self._cached_selector = Selector(self)
114 return self._cached_selector
115
116 def xpath(self, query, **kwargs):
117 return self.selector.xpath(query, **kwargs)
118
119 def css(self, query):
120 return self.selector.css(query)
121
122 def follow(self, url, callback=None, method='GET', headers=None, body=None,
123 cookies=None, meta=None, encoding=None, priority=0,
124 dont_filter=False, errback=None, cb_kwargs=None, flags=None):
125 # type: (...) -> Request
126 """
127 Return a :class:`~.Request` instance to follow a link ``url``.
128 It accepts the same arguments as ``Request.__init__`` method,
129 but ``url`` can be not only an absolute URL, but also
130
131 * a relative URL
132 * a :class:`~scrapy.link.Link` object, e.g. the result of
133 :ref:`topics-link-extractors`
134 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
135 ``response.css('a.my_link')[0]``
136 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
137 ``response.css('a::attr(href)')[0]`` or
138 ``response.xpath('//img/@src')[0]``
139
140 See :ref:`response-follow-example` for usage examples.
141 """
142 if isinstance(url, parsel.Selector):
143 url = _url_from_selector(url)
144 elif isinstance(url, parsel.SelectorList):
145 raise ValueError("SelectorList is not supported")
146 encoding = self.encoding if encoding is None else encoding
147 return super(TextResponse, self).follow(
148 url=url,
149 callback=callback,
150 method=method,
151 headers=headers,
152 body=body,
153 cookies=cookies,
154 meta=meta,
155 encoding=encoding,
156 priority=priority,
157 dont_filter=dont_filter,
158 errback=errback,
159 cb_kwargs=cb_kwargs,
160 flags=flags,
161 )
162
163 def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,
164 cookies=None, meta=None, encoding=None, priority=0,
165 dont_filter=False, errback=None, cb_kwargs=None, flags=None,
166 css=None, xpath=None):
167 # type: (...) -> Generator[Request, None, None]
168 """
169 A generator that produces :class:`~.Request` instances to follow all
170 links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s
171 ``__init__`` method, except that each ``urls`` element does not need to be
172 an absolute URL, it can be any of the following:
173
174 * a relative URL
175 * a :class:`~scrapy.link.Link` object, e.g. the result of
176 :ref:`topics-link-extractors`
177 * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
178 ``response.css('a.my_link')[0]``
179 * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.
180 ``response.css('a::attr(href)')[0]`` or
181 ``response.xpath('//img/@src')[0]``
182
183 In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction
184 within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).
185
186 Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or
187 using the ``css`` or ``xpath`` parameters, this method will not produce requests for
188 selectors from which links cannot be obtained (for instance, anchor tags without an
189 ``href`` attribute)
190 """
191 arguments = [x for x in (urls, css, xpath) if x is not None]
192 if len(arguments) != 1:
193 raise ValueError(
194 "Please supply exactly one of the following arguments: urls, css, xpath"
195 )
196 if not urls:
197 if css:
198 urls = self.css(css)
199 if xpath:
200 urls = self.xpath(xpath)
201 if isinstance(urls, parsel.SelectorList):
202 selectors = urls
203 urls = []
204 for sel in selectors:
205 with suppress(_InvalidSelector):
206 urls.append(_url_from_selector(sel))
207 return super(TextResponse, self).follow_all(
208 urls=urls,
209 callback=callback,
210 method=method,
211 headers=headers,
212 body=body,
213 cookies=cookies,
214 meta=meta,
215 encoding=encoding,
216 priority=priority,
217 dont_filter=dont_filter,
218 errback=errback,
219 cb_kwargs=cb_kwargs,
220 flags=flags,
221 )
222
223
224 class _InvalidSelector(ValueError):
225 """
226 Raised when a URL cannot be obtained from a Selector
227 """
228
229
230 def _url_from_selector(sel):
231 # type: (parsel.Selector) -> str
232 if isinstance(sel.root, str):
233 # e.g. ::attr(href) result
234 return strip_html5_whitespace(sel.root)
235 if not hasattr(sel.root, 'tag'):
236 raise _InvalidSelector("Unsupported selector: %s" % sel)
237 if sel.root.tag not in ('a', 'link'):
238 raise _InvalidSelector("Only <a> and <link> elements are supported; got <%s>" %
239 sel.root.tag)
240 href = sel.root.get('href')
241 if href is None:
242 raise _InvalidSelector("<%s> element has no href attribute: %s" %
243 (sel.root.tag, sel))
244 return strip_html5_whitespace(href)
245
[end of scrapy/http/response/text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py
--- a/scrapy/http/response/text.py
+++ b/scrapy/http/response/text.py
@@ -5,6 +5,7 @@
See documentation in docs/topics/request-response.rst
"""
+import warnings
from contextlib import suppress
from typing import Generator
from urllib.parse import urljoin
@@ -14,6 +15,7 @@
http_content_type_encoding, resolve_encoding)
from w3lib.html import strip_html5_whitespace
+from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Request
from scrapy.http.response import Response
from scrapy.utils.python import memoizemethod_noargs, to_unicode
@@ -61,6 +63,9 @@
def body_as_unicode(self):
"""Return body as unicode"""
+ warnings.warn('Response.body_as_unicode() is deprecated, '
+ 'please use Response.text instead.',
+ ScrapyDeprecationWarning)
return self.text
@property
|
{"golden_diff": "diff --git a/scrapy/http/response/text.py b/scrapy/http/response/text.py\n--- a/scrapy/http/response/text.py\n+++ b/scrapy/http/response/text.py\n@@ -5,6 +5,7 @@\n See documentation in docs/topics/request-response.rst\n \"\"\"\n \n+import warnings\n from contextlib import suppress\n from typing import Generator\n from urllib.parse import urljoin\n@@ -14,6 +15,7 @@\n http_content_type_encoding, resolve_encoding)\n from w3lib.html import strip_html5_whitespace\n \n+from scrapy.exceptions import ScrapyDeprecationWarning\n from scrapy.http import Request\n from scrapy.http.response import Response\n from scrapy.utils.python import memoizemethod_noargs, to_unicode\n@@ -61,6 +63,9 @@\n \n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n+ warnings.warn('Response.body_as_unicode() is deprecated, '\n+ 'please use Response.text instead.',\n+ ScrapyDeprecationWarning)\n return self.text\n \n @property\n", "issue": "Deprecate Response.body_as_unicode\n\n", "before_files": [{"content": "\"\"\"\nThis module implements the TextResponse class which adds encoding handling and\ndiscovering (through HTTP headers) to base Response class.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\n\nfrom contextlib import suppress\nfrom typing import Generator\nfrom urllib.parse import urljoin\n\nimport parsel\nfrom w3lib.encoding import (html_body_declared_encoding, html_to_unicode,\n http_content_type_encoding, resolve_encoding)\nfrom w3lib.html import strip_html5_whitespace\n\nfrom scrapy.http import Request\nfrom scrapy.http.response import Response\nfrom scrapy.utils.python import memoizemethod_noargs, to_unicode\nfrom scrapy.utils.response import get_base_url\n\n\nclass TextResponse(Response):\n\n _DEFAULT_ENCODING = 'ascii'\n\n def __init__(self, *args, **kwargs):\n self._encoding = kwargs.pop('encoding', None)\n self._cached_benc = None\n self._cached_ubody = None\n self._cached_selector = None\n super(TextResponse, self).__init__(*args, **kwargs)\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = to_unicode(url, self.encoding)\n else:\n super(TextResponse, self)._set_url(url)\n\n def _set_body(self, body):\n self._body = b'' # used by encoding detection\n if isinstance(body, str):\n if self._encoding is None:\n raise TypeError('Cannot convert unicode body - %s has no encoding' %\n type(self).__name__)\n self._body = body.encode(self._encoding)\n else:\n super(TextResponse, self)._set_body(body)\n\n def replace(self, *args, **kwargs):\n kwargs.setdefault('encoding', self.encoding)\n return Response.replace(self, *args, **kwargs)\n\n @property\n def encoding(self):\n return self._declared_encoding() or self._body_inferred_encoding()\n\n def _declared_encoding(self):\n return self._encoding or self._headers_encoding() \\\n or self._body_declared_encoding()\n\n def body_as_unicode(self):\n \"\"\"Return body as unicode\"\"\"\n return self.text\n\n @property\n def text(self):\n \"\"\" Body as unicode \"\"\"\n # access self.encoding before _cached_ubody to make sure\n # _body_inferred_encoding is called\n benc = self.encoding\n if self._cached_ubody is None:\n charset = 'charset=%s' % benc\n self._cached_ubody = html_to_unicode(charset, self.body)[1]\n return self._cached_ubody\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(get_base_url(self), url)\n\n @memoizemethod_noargs\n def _headers_encoding(self):\n content_type = self.headers.get(b'Content-Type', b'')\n return http_content_type_encoding(to_unicode(content_type))\n\n def _body_inferred_encoding(self):\n if self._cached_benc is None:\n content_type = to_unicode(self.headers.get(b'Content-Type', b''))\n benc, ubody = html_to_unicode(content_type, self.body,\n auto_detect_fun=self._auto_detect_fun,\n default_encoding=self._DEFAULT_ENCODING)\n self._cached_benc = benc\n self._cached_ubody = ubody\n return self._cached_benc\n\n def _auto_detect_fun(self, text):\n for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):\n try:\n text.decode(enc)\n except UnicodeError:\n continue\n return resolve_encoding(enc)\n\n @memoizemethod_noargs\n def _body_declared_encoding(self):\n return html_body_declared_encoding(self.body)\n\n @property\n def selector(self):\n from scrapy.selector import Selector\n if self._cached_selector is None:\n self._cached_selector = Selector(self)\n return self._cached_selector\n\n def xpath(self, query, **kwargs):\n return self.selector.xpath(query, **kwargs)\n\n def css(self, query):\n return self.selector.css(query)\n\n def follow(self, url, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None):\n # type: (...) -> Request\n \"\"\"\n Return a :class:`~.Request` instance to follow a link ``url``.\n It accepts the same arguments as ``Request.__init__`` method,\n but ``url`` can be not only an absolute URL, but also\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n See :ref:`response-follow-example` for usage examples.\n \"\"\"\n if isinstance(url, parsel.Selector):\n url = _url_from_selector(url)\n elif isinstance(url, parsel.SelectorList):\n raise ValueError(\"SelectorList is not supported\")\n encoding = self.encoding if encoding is None else encoding\n return super(TextResponse, self).follow(\n url=url,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None,\n cookies=None, meta=None, encoding=None, priority=0,\n dont_filter=False, errback=None, cb_kwargs=None, flags=None,\n css=None, xpath=None):\n # type: (...) -> Generator[Request, None, None]\n \"\"\"\n A generator that produces :class:`~.Request` instances to follow all\n links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s\n ``__init__`` method, except that each ``urls`` element does not need to be\n an absolute URL, it can be any of the following:\n\n * a relative URL\n * a :class:`~scrapy.link.Link` object, e.g. the result of\n :ref:`topics-link-extractors`\n * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g.\n ``response.css('a.my_link')[0]``\n * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g.\n ``response.css('a::attr(href)')[0]`` or\n ``response.xpath('//img/@src')[0]``\n\n In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction\n within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).\n\n Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or\n using the ``css`` or ``xpath`` parameters, this method will not produce requests for\n selectors from which links cannot be obtained (for instance, anchor tags without an\n ``href`` attribute)\n \"\"\"\n arguments = [x for x in (urls, css, xpath) if x is not None]\n if len(arguments) != 1:\n raise ValueError(\n \"Please supply exactly one of the following arguments: urls, css, xpath\"\n )\n if not urls:\n if css:\n urls = self.css(css)\n if xpath:\n urls = self.xpath(xpath)\n if isinstance(urls, parsel.SelectorList):\n selectors = urls\n urls = []\n for sel in selectors:\n with suppress(_InvalidSelector):\n urls.append(_url_from_selector(sel))\n return super(TextResponse, self).follow_all(\n urls=urls,\n callback=callback,\n method=method,\n headers=headers,\n body=body,\n cookies=cookies,\n meta=meta,\n encoding=encoding,\n priority=priority,\n dont_filter=dont_filter,\n errback=errback,\n cb_kwargs=cb_kwargs,\n flags=flags,\n )\n\n\nclass _InvalidSelector(ValueError):\n \"\"\"\n Raised when a URL cannot be obtained from a Selector\n \"\"\"\n\n\ndef _url_from_selector(sel):\n # type: (parsel.Selector) -> str\n if isinstance(sel.root, str):\n # e.g. ::attr(href) result\n return strip_html5_whitespace(sel.root)\n if not hasattr(sel.root, 'tag'):\n raise _InvalidSelector(\"Unsupported selector: %s\" % sel)\n if sel.root.tag not in ('a', 'link'):\n raise _InvalidSelector(\"Only <a> and <link> elements are supported; got <%s>\" %\n sel.root.tag)\n href = sel.root.get('href')\n if href is None:\n raise _InvalidSelector(\"<%s> element has no href attribute: %s\" %\n (sel.root.tag, sel))\n return strip_html5_whitespace(href)\n", "path": "scrapy/http/response/text.py"}]}
| 3,241 | 220 |
gh_patches_debug_23289
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6100
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Speakers are queried by role in after event mail cron job but no such role exists
Currently, speakers are being queried under the assumption that there is a role called speaker, no such role exists.
</issue>
<code>
[start of app/api/helpers/scheduled_jobs.py]
1 import datetime
2
3 import pytz
4 from dateutil.relativedelta import relativedelta
5
6 from app.api.helpers.db import safe_query, save_to_db
7 from app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \
8 send_followup_email_for_monthly_fee_payment
9 from app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \
10 send_notif_after_event
11 from app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name
12 from app.api.helpers.utilities import monthdelta
13 from app.models import db
14 from app.models.event import Event
15 from app.models.event_invoice import EventInvoice
16 from app.models.order import Order
17 from app.models.session import Session
18 from app.models.ticket import Ticket
19 from app.models.ticket_fee import get_fee
20 from app.settings import get_settings
21
22
23 def send_after_event_mail():
24 from app import current_app as app
25 with app.app_context():
26 events = Event.query.all()
27 upcoming_events = get_upcoming_events()
28 upcoming_event_links = "<ul>"
29 for upcoming_event in upcoming_events:
30 frontend_url = get_settings()['frontend_url']
31 upcoming_event_links += "<li><a href='{}/events/{}'>{}</a></li>" \
32 .format(frontend_url, upcoming_event.id, upcoming_event.name)
33 upcoming_event_links += "</ul>"
34 for event in events:
35 organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
36 speakers = get_user_event_roles_by_role_name(event.id, 'speaker')
37 current_time = datetime.datetime.now(pytz.timezone(event.timezone))
38 time_difference = current_time - event.ends_at
39 time_difference_minutes = (time_difference.days * 24 * 60) + \
40 (time_difference.seconds / 60)
41 if current_time > event.ends_at and time_difference_minutes < 1440:
42 for speaker in speakers:
43 send_email_after_event(speaker.user.email, event.name, upcoming_event_links)
44 send_notif_after_event(speaker.user, event.name)
45 for organizer in organizers:
46 send_email_after_event(organizer.user.email, event.name, upcoming_event_links)
47 send_notif_after_event(organizer.user, event.name)
48
49
50 def change_session_state_on_event_completion():
51 from app import current_app as app
52 with app.app_context():
53 sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\
54 .filter(Event.ends_at < datetime.datetime.now())
55 for session in sessions_to_be_changed:
56 session.state = 'rejected'
57 save_to_db(session, 'Changed {} session state to rejected'.format(session.title))
58
59
60 def send_event_fee_notification():
61 from app import current_app as app
62 with app.app_context():
63 events = Event.query.all()
64 for event in events:
65 latest_invoice = EventInvoice.query.filter_by(
66 event_id=event.id).order_by(EventInvoice.created_at.desc()).first()
67
68 if latest_invoice:
69 orders = Order.query \
70 .filter_by(event_id=event.id) \
71 .filter_by(status='completed') \
72 .filter(Order.completed_at > latest_invoice.created_at).all()
73 else:
74 orders = Order.query.filter_by(
75 event_id=event.id).filter_by(status='completed').all()
76
77 fee_total = 0
78 for order in orders:
79 for order_ticket in order.tickets:
80 ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')
81 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
82 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)
83 fee_total += fee
84
85 if fee_total > 0:
86 organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()
87 new_invoice = EventInvoice(
88 amount=fee_total, event_id=event.id, user_id=organizer.user.id)
89
90 if event.discount_code_id and event.discount_code:
91 r = relativedelta(datetime.utcnow(), event.created_at)
92 if r <= event.discount_code.valid_till:
93 new_invoice.amount = fee_total - \
94 (fee_total * (event.discount_code.value / 100.0))
95 new_invoice.discount_code_id = event.discount_code_id
96
97 save_to_db(new_invoice)
98 prev_month = monthdelta(new_invoice.created_at, 1).strftime(
99 "%b %Y") # Displayed as Aug 2016
100 app_name = get_settings()['app_name']
101 frontend_url = get_settings()['frontend_url']
102 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)
103 send_email_for_monthly_fee_payment(new_invoice.user.email,
104 event.name,
105 prev_month,
106 new_invoice.amount,
107 app_name,
108 link)
109 send_notif_monthly_fee_payment(new_invoice.user,
110 event.name,
111 prev_month,
112 new_invoice.amount,
113 app_name,
114 link,
115 new_invoice.event_id)
116
117
118 def send_event_fee_notification_followup():
119 from app import current_app as app
120 with app.app_context():
121 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()
122 for incomplete_invoice in incomplete_invoices:
123 if incomplete_invoice.amount > 0:
124 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(
125 "%b %Y") # Displayed as Aug 2016
126 app_name = get_settings()['app_name']
127 frontend_url = get_settings()['frontend_url']
128 link = '{}/invoices/{}'.format(frontend_url,
129 incomplete_invoice.identifier)
130 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,
131 incomplete_invoice.event.name,
132 prev_month,
133 incomplete_invoice.amount,
134 app_name,
135 link)
136 send_followup_notif_monthly_fee_payment(incomplete_invoice.user,
137 incomplete_invoice.event.name,
138 prev_month,
139 incomplete_invoice.amount,
140 app_name,
141 link,
142 incomplete_invoice.event.id)
143
144
145 def expire_pending_tickets():
146 from app import current_app as app
147 with app.app_context():
148 db.session.query(Order).filter(Order.status == 'pending',
149 (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\
150 update({'status': 'expired'})
151 db.session.commit()
152
[end of app/api/helpers/scheduled_jobs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py
--- a/app/api/helpers/scheduled_jobs.py
+++ b/app/api/helpers/scheduled_jobs.py
@@ -14,6 +14,7 @@
from app.models.event import Event
from app.models.event_invoice import EventInvoice
from app.models.order import Order
+from app.models.speaker import Speaker
from app.models.session import Session
from app.models.ticket import Ticket
from app.models.ticket_fee import get_fee
@@ -33,7 +34,7 @@
upcoming_event_links += "</ul>"
for event in events:
organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
- speakers = get_user_event_roles_by_role_name(event.id, 'speaker')
+ speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()
current_time = datetime.datetime.now(pytz.timezone(event.timezone))
time_difference = current_time - event.ends_at
time_difference_minutes = (time_difference.days * 24 * 60) + \
|
{"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -14,6 +14,7 @@\n from app.models.event import Event\n from app.models.event_invoice import EventInvoice\n from app.models.order import Order\n+from app.models.speaker import Speaker\n from app.models.session import Session\n from app.models.ticket import Ticket\n from app.models.ticket_fee import get_fee\n@@ -33,7 +34,7 @@\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n- speakers = get_user_event_roles_by_role_name(event.id, 'speaker')\n+ speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n", "issue": "Speakers are queried by role in after event mail cron job but no such role exists\nCurrently, speakers are being queried under the assumption that there is a role called speaker, no such role exists.\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import get_fee\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = get_user_event_roles_by_role_name(event.id, 'speaker')\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=organizer.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n", "path": "app/api/helpers/scheduled_jobs.py"}]}
| 2,285 | 231 |
gh_patches_debug_13464
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-3285
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LanguageError: IOError: [Errno 2] No translation file found for domain: 'ckan'
Hi,
We are noticing some 500 errors are being generated when /e or /n are being passed instead of /en
Passing other letters such as /a /b /c will trigger a 404
### CKAN Version if known (or site URL)
Ckan 2.3.X - 2.5.2+
### Please describe the expected behaviour
http://demo.ckan.org/en = 200
http://demo.ckan.org/e = 404
http://demo.ckan.org/n = 404
### Please describe the actual behaviour
http://demo.ckan.org/en = 200
http://demo.ckan.org/e = 500
http://demo.ckan.org/n = 500
### What steps can be taken to reproduce the issue?
pass /e or /n in any ckan instance url and you can trigger a 500 internal server error.
http://demo.ckan.org/en/dataset = 200
http://demo.ckan.org/e/dataset = 500
</issue>
<code>
[start of ckan/lib/i18n.py]
1 # encoding: utf-8
2
3 import os
4
5 from babel import Locale
6 from babel.core import (LOCALE_ALIASES,
7 get_locale_identifier,
8 UnknownLocaleError)
9 from babel.support import Translations
10 from paste.deploy.converters import aslist
11 from pylons import i18n
12 import pylons
13
14
15 from ckan.common import config
16 import ckan.i18n
17 from ckan.plugins import PluginImplementations
18 from ckan.plugins.interfaces import ITranslation
19
20 # Default Portuguese language to Brazilian territory, since
21 # we don't have a Portuguese territory translation currently.
22 LOCALE_ALIASES['pt'] = 'pt_BR'
23
24
25 def get_locales_from_config():
26 ''' despite the name of this function it gets the locales defined by
27 the config AND also the locals available subject to the config. '''
28 locales_offered = config.get('ckan.locales_offered', '').split()
29 filtered_out = config.get('ckan.locales_filtered_out', '').split()
30 locale_default = config.get('ckan.locale_default', 'en')
31 locale_order = config.get('ckan.locale_order', '').split()
32 known_locales = get_locales()
33 all_locales = (set(known_locales) |
34 set(locales_offered) |
35 set(locale_order) |
36 set(locale_default))
37 all_locales -= set(filtered_out)
38 return all_locales
39
40
41 def _get_locales():
42 # FIXME this wants cleaning up and merging with get_locales_from_config()
43 assert not config.get('lang'), \
44 ('"lang" config option not supported - please use ckan.locale_default '
45 'instead.')
46 locales_offered = config.get('ckan.locales_offered', '').split()
47 filtered_out = config.get('ckan.locales_filtered_out', '').split()
48 locale_default = config.get('ckan.locale_default', 'en')
49 locale_order = config.get('ckan.locale_order', '').split()
50
51 locales = ['en']
52 if config.get('ckan.i18n_directory'):
53 i18n_path = os.path.join(config.get('ckan.i18n_directory'), 'i18n')
54 else:
55 i18n_path = os.path.dirname(ckan.i18n.__file__)
56
57 # For every file in the ckan i18n directory see if babel can understand
58 # the locale. If yes, add it to the available locales
59 for locale in os.listdir(i18n_path):
60 try:
61 Locale.parse(locale)
62 locales.append(locale)
63 except (ValueError, UnknownLocaleError):
64 # Babel does not know how to make a locale out of this.
65 # This is fine since we are passing all files in the
66 # ckan.i18n_directory here which e.g. includes the __init__.py
67 pass
68
69 assert locale_default in locales, \
70 'default language "%s" not available' % locale_default
71
72 locale_list = []
73 for locale in locales:
74 # no duplicates
75 if locale in locale_list:
76 continue
77 # if offered locales then check locale is offered
78 if locales_offered and locale not in locales_offered:
79 continue
80 # remove if filtered out
81 if locale in filtered_out:
82 continue
83 # ignore the default as it will be added first
84 if locale == locale_default:
85 continue
86 locale_list.append(locale)
87 # order the list if specified
88 ordered_list = [locale_default]
89 for locale in locale_order:
90 if locale in locale_list:
91 ordered_list.append(locale)
92 # added so remove from our list
93 locale_list.remove(locale)
94 # add any remaining locales not ordered
95 ordered_list += locale_list
96
97 return ordered_list
98
99 available_locales = None
100 locales = None
101 locales_dict = None
102 _non_translated_locals = None
103
104
105 def get_locales():
106 ''' Get list of available locales
107 e.g. [ 'en', 'de', ... ]
108 '''
109 global locales
110 if not locales:
111 locales = _get_locales()
112 return locales
113
114
115 def non_translated_locals():
116 ''' These are the locales that are available but for which there are
117 no translations. returns a list like ['en', 'de', ...] '''
118 global _non_translated_locals
119 if not _non_translated_locals:
120 locales = config.get('ckan.locale_order', '').split()
121 _non_translated_locals = [x for x in locales if x not in get_locales()]
122 return _non_translated_locals
123
124
125 def get_locales_dict():
126 ''' Get a dict of the available locales
127 e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } '''
128 global locales_dict
129 if not locales_dict:
130 locales = _get_locales()
131 locales_dict = {}
132 for locale in locales:
133 locales_dict[str(locale)] = Locale.parse(locale)
134 return locales_dict
135
136
137 def get_available_locales():
138 ''' Get a list of the available locales
139 e.g. [ Locale('en'), Locale('de'), ... ] '''
140 global available_locales
141 if not available_locales:
142 available_locales = []
143 for locale in get_locales():
144 # Add the short names for the locales. This equals the filename
145 # of the ckan translation files as opposed to the long name
146 # that includes the script which is generated by babel
147 # so e.g. `zn_CH` instead of `zn_Hans_CH` this is needed
148 # to properly construct urls with url_for
149 parsed_locale = Locale.parse(locale)
150 parsed_locale.short_name = locale
151
152 # Add the full identifier (eg `pt_BR`) to the locale classes,
153 # as it does not offer a way of accessing it directly
154 parsed_locale.identifier = \
155 get_identifier_from_locale_class(parsed_locale)
156 available_locales.append(parsed_locale)
157 return available_locales
158
159
160 def get_identifier_from_locale_class(locale):
161 return get_locale_identifier(
162 (locale.language,
163 locale.territory,
164 locale.script,
165 locale.variant))
166
167
168 def _set_lang(lang):
169 ''' Allows a custom i18n directory to be specified.
170 Creates a fake config file to pass to pylons.i18n.set_lang, which
171 sets the Pylons root path to desired i18n_directory.
172 This is needed as Pylons will only look for an i18n directory in
173 the application root.'''
174 if config.get('ckan.i18n_directory'):
175 fake_config = {'pylons.paths': {'root': config['ckan.i18n_directory']},
176 'pylons.package': config['pylons.package']}
177 i18n.set_lang(lang, config=fake_config, class_=Translations)
178 else:
179 i18n.set_lang(lang, class_=Translations)
180
181
182 def handle_request(request, tmpl_context):
183 ''' Set the language for the request '''
184 lang = request.environ.get('CKAN_LANG') or \
185 config.get('ckan.locale_default', 'en')
186 if lang != 'en':
187 set_lang(lang)
188
189 for plugin in PluginImplementations(ITranslation):
190 if lang in plugin.i18n_locales():
191 _add_extra_translations(plugin.i18n_directory(), lang,
192 plugin.i18n_domain())
193
194 extra_directory = config.get('ckan.i18n.extra_directory')
195 extra_domain = config.get('ckan.i18n.extra_gettext_domain')
196 extra_locales = aslist(config.get('ckan.i18n.extra_locales'))
197 if extra_directory and extra_domain and extra_locales:
198 if lang in extra_locales:
199 _add_extra_translations(extra_directory, lang, extra_domain)
200
201 tmpl_context.language = lang
202 return lang
203
204
205 def _add_extra_translations(dirname, locales, domain):
206 translator = Translations.load(dirname=dirname, locales=locales,
207 domain=domain)
208 try:
209 pylons.translator.merge(translator)
210 except AttributeError:
211 # this occurs when an extension has 'en' translations that
212 # replace the default strings. As set_lang has not been run,
213 # pylons.translation is the NullTranslation, so we have to
214 # replace the StackedObjectProxy ourselves manually.
215 environ = pylons.request.environ
216 environ['pylons.pylons'].translator = translator
217 if 'paste.registry' in environ:
218 environ['paste.registry'].replace(pylons.translator,
219 translator)
220
221
222 def get_lang():
223 ''' Returns the current language. Based on babel.i18n.get_lang but
224 works when set_lang has not been run (i.e. still in English). '''
225 langs = i18n.get_lang()
226 if langs:
227 return langs[0]
228 else:
229 return 'en'
230
231
232 def set_lang(language_code):
233 ''' Wrapper to pylons call '''
234 if language_code in non_translated_locals():
235 language_code = config.get('ckan.locale_default', 'en')
236 if language_code != 'en':
237 _set_lang(language_code)
238
[end of ckan/lib/i18n.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckan/lib/i18n.py b/ckan/lib/i18n.py
--- a/ckan/lib/i18n.py
+++ b/ckan/lib/i18n.py
@@ -27,8 +27,9 @@
the config AND also the locals available subject to the config. '''
locales_offered = config.get('ckan.locales_offered', '').split()
filtered_out = config.get('ckan.locales_filtered_out', '').split()
- locale_default = config.get('ckan.locale_default', 'en')
+ locale_default = [config.get('ckan.locale_default', 'en')]
locale_order = config.get('ckan.locale_order', '').split()
+
known_locales = get_locales()
all_locales = (set(known_locales) |
set(locales_offered) |
|
{"golden_diff": "diff --git a/ckan/lib/i18n.py b/ckan/lib/i18n.py\n--- a/ckan/lib/i18n.py\n+++ b/ckan/lib/i18n.py\n@@ -27,8 +27,9 @@\n the config AND also the locals available subject to the config. '''\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n- locale_default = config.get('ckan.locale_default', 'en')\n+ locale_default = [config.get('ckan.locale_default', 'en')]\n locale_order = config.get('ckan.locale_order', '').split()\n+\n known_locales = get_locales()\n all_locales = (set(known_locales) |\n set(locales_offered) |\n", "issue": "LanguageError: IOError: [Errno 2] No translation file found for domain: 'ckan'\nHi, \n\nWe are noticing some 500 errors are being generated when /e or /n are being passed instead of /en\n\nPassing other letters such as /a /b /c will trigger a 404\n### CKAN Version if known (or site URL)\n\nCkan 2.3.X - 2.5.2+\n### Please describe the expected behaviour\n\nhttp://demo.ckan.org/en = 200\nhttp://demo.ckan.org/e = 404\nhttp://demo.ckan.org/n = 404\n### Please describe the actual behaviour\n\nhttp://demo.ckan.org/en = 200\nhttp://demo.ckan.org/e = 500\nhttp://demo.ckan.org/n = 500\n### What steps can be taken to reproduce the issue?\n\npass /e or /n in any ckan instance url and you can trigger a 500 internal server error.\n\nhttp://demo.ckan.org/en/dataset = 200\n\nhttp://demo.ckan.org/e/dataset = 500\n\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\n\nfrom babel import Locale\nfrom babel.core import (LOCALE_ALIASES,\n get_locale_identifier,\n UnknownLocaleError)\nfrom babel.support import Translations\nfrom paste.deploy.converters import aslist\nfrom pylons import i18n\nimport pylons\n\n\nfrom ckan.common import config\nimport ckan.i18n\nfrom ckan.plugins import PluginImplementations\nfrom ckan.plugins.interfaces import ITranslation\n\n# Default Portuguese language to Brazilian territory, since\n# we don't have a Portuguese territory translation currently.\nLOCALE_ALIASES['pt'] = 'pt_BR'\n\n\ndef get_locales_from_config():\n ''' despite the name of this function it gets the locales defined by\n the config AND also the locals available subject to the config. '''\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n locale_default = config.get('ckan.locale_default', 'en')\n locale_order = config.get('ckan.locale_order', '').split()\n known_locales = get_locales()\n all_locales = (set(known_locales) |\n set(locales_offered) |\n set(locale_order) |\n set(locale_default))\n all_locales -= set(filtered_out)\n return all_locales\n\n\ndef _get_locales():\n # FIXME this wants cleaning up and merging with get_locales_from_config()\n assert not config.get('lang'), \\\n ('\"lang\" config option not supported - please use ckan.locale_default '\n 'instead.')\n locales_offered = config.get('ckan.locales_offered', '').split()\n filtered_out = config.get('ckan.locales_filtered_out', '').split()\n locale_default = config.get('ckan.locale_default', 'en')\n locale_order = config.get('ckan.locale_order', '').split()\n\n locales = ['en']\n if config.get('ckan.i18n_directory'):\n i18n_path = os.path.join(config.get('ckan.i18n_directory'), 'i18n')\n else:\n i18n_path = os.path.dirname(ckan.i18n.__file__)\n\n # For every file in the ckan i18n directory see if babel can understand\n # the locale. If yes, add it to the available locales\n for locale in os.listdir(i18n_path):\n try:\n Locale.parse(locale)\n locales.append(locale)\n except (ValueError, UnknownLocaleError):\n # Babel does not know how to make a locale out of this.\n # This is fine since we are passing all files in the\n # ckan.i18n_directory here which e.g. includes the __init__.py\n pass\n\n assert locale_default in locales, \\\n 'default language \"%s\" not available' % locale_default\n\n locale_list = []\n for locale in locales:\n # no duplicates\n if locale in locale_list:\n continue\n # if offered locales then check locale is offered\n if locales_offered and locale not in locales_offered:\n continue\n # remove if filtered out\n if locale in filtered_out:\n continue\n # ignore the default as it will be added first\n if locale == locale_default:\n continue\n locale_list.append(locale)\n # order the list if specified\n ordered_list = [locale_default]\n for locale in locale_order:\n if locale in locale_list:\n ordered_list.append(locale)\n # added so remove from our list\n locale_list.remove(locale)\n # add any remaining locales not ordered\n ordered_list += locale_list\n\n return ordered_list\n\navailable_locales = None\nlocales = None\nlocales_dict = None\n_non_translated_locals = None\n\n\ndef get_locales():\n ''' Get list of available locales\n e.g. [ 'en', 'de', ... ]\n '''\n global locales\n if not locales:\n locales = _get_locales()\n return locales\n\n\ndef non_translated_locals():\n ''' These are the locales that are available but for which there are\n no translations. returns a list like ['en', 'de', ...] '''\n global _non_translated_locals\n if not _non_translated_locals:\n locales = config.get('ckan.locale_order', '').split()\n _non_translated_locals = [x for x in locales if x not in get_locales()]\n return _non_translated_locals\n\n\ndef get_locales_dict():\n ''' Get a dict of the available locales\n e.g. { 'en' : Locale('en'), 'de' : Locale('de'), ... } '''\n global locales_dict\n if not locales_dict:\n locales = _get_locales()\n locales_dict = {}\n for locale in locales:\n locales_dict[str(locale)] = Locale.parse(locale)\n return locales_dict\n\n\ndef get_available_locales():\n ''' Get a list of the available locales\n e.g. [ Locale('en'), Locale('de'), ... ] '''\n global available_locales\n if not available_locales:\n available_locales = []\n for locale in get_locales():\n # Add the short names for the locales. This equals the filename\n # of the ckan translation files as opposed to the long name\n # that includes the script which is generated by babel\n # so e.g. `zn_CH` instead of `zn_Hans_CH` this is needed\n # to properly construct urls with url_for\n parsed_locale = Locale.parse(locale)\n parsed_locale.short_name = locale\n\n # Add the full identifier (eg `pt_BR`) to the locale classes,\n # as it does not offer a way of accessing it directly\n parsed_locale.identifier = \\\n get_identifier_from_locale_class(parsed_locale)\n available_locales.append(parsed_locale)\n return available_locales\n\n\ndef get_identifier_from_locale_class(locale):\n return get_locale_identifier(\n (locale.language,\n locale.territory,\n locale.script,\n locale.variant))\n\n\ndef _set_lang(lang):\n ''' Allows a custom i18n directory to be specified.\n Creates a fake config file to pass to pylons.i18n.set_lang, which\n sets the Pylons root path to desired i18n_directory.\n This is needed as Pylons will only look for an i18n directory in\n the application root.'''\n if config.get('ckan.i18n_directory'):\n fake_config = {'pylons.paths': {'root': config['ckan.i18n_directory']},\n 'pylons.package': config['pylons.package']}\n i18n.set_lang(lang, config=fake_config, class_=Translations)\n else:\n i18n.set_lang(lang, class_=Translations)\n\n\ndef handle_request(request, tmpl_context):\n ''' Set the language for the request '''\n lang = request.environ.get('CKAN_LANG') or \\\n config.get('ckan.locale_default', 'en')\n if lang != 'en':\n set_lang(lang)\n\n for plugin in PluginImplementations(ITranslation):\n if lang in plugin.i18n_locales():\n _add_extra_translations(plugin.i18n_directory(), lang,\n plugin.i18n_domain())\n\n extra_directory = config.get('ckan.i18n.extra_directory')\n extra_domain = config.get('ckan.i18n.extra_gettext_domain')\n extra_locales = aslist(config.get('ckan.i18n.extra_locales'))\n if extra_directory and extra_domain and extra_locales:\n if lang in extra_locales:\n _add_extra_translations(extra_directory, lang, extra_domain)\n\n tmpl_context.language = lang\n return lang\n\n\ndef _add_extra_translations(dirname, locales, domain):\n translator = Translations.load(dirname=dirname, locales=locales,\n domain=domain)\n try:\n pylons.translator.merge(translator)\n except AttributeError:\n # this occurs when an extension has 'en' translations that\n # replace the default strings. As set_lang has not been run,\n # pylons.translation is the NullTranslation, so we have to\n # replace the StackedObjectProxy ourselves manually.\n environ = pylons.request.environ\n environ['pylons.pylons'].translator = translator\n if 'paste.registry' in environ:\n environ['paste.registry'].replace(pylons.translator,\n translator)\n\n\ndef get_lang():\n ''' Returns the current language. Based on babel.i18n.get_lang but\n works when set_lang has not been run (i.e. still in English). '''\n langs = i18n.get_lang()\n if langs:\n return langs[0]\n else:\n return 'en'\n\n\ndef set_lang(language_code):\n ''' Wrapper to pylons call '''\n if language_code in non_translated_locals():\n language_code = config.get('ckan.locale_default', 'en')\n if language_code != 'en':\n _set_lang(language_code)\n", "path": "ckan/lib/i18n.py"}]}
| 3,379 | 188 |
gh_patches_debug_30257
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-317
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gRPC: Received message larger than max (11059277 vs. 4194304)
We are using mlserver (gRPC) for serving custom PyTorch ML Model and are sending images of size 640x640x3 . But because of the size we are getting the below error:
> status = StatusCode.RESOURCE_EXHAUSTED
> details = "Received message larger than max (11059277 vs. 4194304)"
> debug_error_string = "{"created":"@1630074151.799330000","description":"Error received from peer ipv4:0.0.0.0:28081","file":"src/core/lib/surface/call.cc","file_line":1070,"grpc_message":"Received message larger than max (11059277 vs. 4194304)","grpc_status":8}"
Setting the following on client side did not work:
```
maxMsgLength = 20 * 1024 * 1024
ch_options = [('grpc.max_message_length', maxMsgLength),
('grpc.max_send_message_length', maxMsgLength),
('grpc.max_receive_message_length', maxMsgLength)]
channel = grpc.insecure_channel(f'{host}:{port}', options=ch_options)
```
However setting it on server side worked:
```
def _create_server(self):
self._inference_servicer = InferenceServicer(self._data_plane)
self._model_repository_servicer = ModelRepositoryServicer(
self._model_repository_handlers
)
channel_opt = [('grpc.max_send_message_length', 20 * 1024 * 1024), ('grpc.max_receive_message_length', 20 * 1024 * 1024)]
self._server = aio.server(
ThreadPoolExecutor(max_workers=self._settings.grpc_workers), options = channel_opt
)
add_GRPCInferenceServiceServicer_to_server(
self._inference_servicer, self._server
)
```
But this is not a proper solution, since our mlserver runs in a docker and does a pip install.
This should be part of the server config in MLServer.
</issue>
<code>
[start of mlserver/grpc/server.py]
1 from grpc import aio
2 from concurrent.futures import ThreadPoolExecutor
3
4 from ..handlers import DataPlane, ModelRepositoryHandlers
5 from ..settings import Settings
6
7 from .servicers import InferenceServicer, ModelRepositoryServicer
8 from .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server
9 from .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server
10
11
12 class GRPCServer:
13 def __init__(
14 self,
15 settings: Settings,
16 data_plane: DataPlane,
17 model_repository_handlers: ModelRepositoryHandlers,
18 ):
19 self._settings = settings
20 self._data_plane = data_plane
21 self._model_repository_handlers = model_repository_handlers
22
23 def _create_server(self):
24 self._inference_servicer = InferenceServicer(self._data_plane)
25 self._model_repository_servicer = ModelRepositoryServicer(
26 self._model_repository_handlers
27 )
28 self._server = aio.server(
29 ThreadPoolExecutor(max_workers=self._settings.grpc_workers)
30 )
31
32 add_GRPCInferenceServiceServicer_to_server(
33 self._inference_servicer, self._server
34 )
35 add_ModelRepositoryServiceServicer_to_server(
36 self._model_repository_servicer, self._server
37 )
38
39 self._server.add_insecure_port(
40 f"{self._settings.host}:{self._settings.grpc_port}"
41 )
42
43 return self._server
44
45 async def start(self):
46 self._create_server()
47
48 await self._server.start()
49 await self._server.wait_for_termination()
50
51 async def stop(self):
52 # TODO: Read from config
53 await self._server.stop(grace=5)
54
[end of mlserver/grpc/server.py]
[start of mlserver/settings.py]
1 from typing import List, Optional
2 from pydantic import BaseSettings, PyObject
3
4 from .version import __version__
5 from .types import MetadataTensor
6
7 ENV_PREFIX_SETTINGS = "MLSERVER_"
8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_"
9
10
11 class Settings(BaseSettings):
12 class Config:
13 env_prefix = ENV_PREFIX_SETTINGS
14
15 debug: bool = True
16
17 # Model repository folder
18 model_repository_root: str = "."
19 load_models_at_startup: bool = True
20
21 # Server metadata
22 server_name: str = "mlserver"
23 server_version: str = __version__
24 extensions: List[str] = []
25
26 # Server settings
27 host: str = "0.0.0.0"
28 http_port: int = 8080
29 grpc_port: int = 8081
30 grpc_workers: int = 10
31
32
33 class ModelParameters(BaseSettings):
34 """
35 Parameters that apply only to a particular instance of a model.
36 This can include things like model weights.
37 The main difference with respect to ModelSettings is that parameters can
38 change on each instance (e.g. each version) of the model.
39 """
40
41 class Config:
42 env_prefix = ENV_PREFIX_MODEL_SETTINGS
43
44 uri: Optional[str] = None
45 version: Optional[str] = None
46 format: Optional[str] = None
47 extra: Optional[dict] = {}
48
49
50 class ModelSettings(BaseSettings):
51 class Config:
52 env_prefix = ENV_PREFIX_MODEL_SETTINGS
53
54 name: str = ""
55
56 # Model metadata
57 platform: str = ""
58 versions: Optional[List[str]] = []
59 inputs: Optional[List[MetadataTensor]] = []
60 outputs: Optional[List[MetadataTensor]] = []
61
62 # Parallel settings
63 parallel_workers: Optional[int] = 4
64
65 # Custom model class implementation
66 implementation: PyObject = "mlserver.model.MLModel" # type: ignore
67
68 # Model parameters are meant to be set directly by the MLServer runtime.
69 # However, it's also possible to override them manually.
70 parameters: Optional[ModelParameters] = None
71
[end of mlserver/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlserver/grpc/server.py b/mlserver/grpc/server.py
--- a/mlserver/grpc/server.py
+++ b/mlserver/grpc/server.py
@@ -1,5 +1,6 @@
from grpc import aio
from concurrent.futures import ThreadPoolExecutor
+from typing import Any, List, Tuple
from ..handlers import DataPlane, ModelRepositoryHandlers
from ..settings import Settings
@@ -25,8 +26,10 @@
self._model_repository_servicer = ModelRepositoryServicer(
self._model_repository_handlers
)
+
self._server = aio.server(
- ThreadPoolExecutor(max_workers=self._settings.grpc_workers)
+ ThreadPoolExecutor(max_workers=self._settings.grpc_workers),
+ options=self._get_options(),
)
add_GRPCInferenceServiceServicer_to_server(
@@ -42,6 +45,19 @@
return self._server
+ def _get_options(self) -> List[Tuple[str, Any]]:
+ options = []
+
+ max_message_length = self._settings.grpc_max_message_length
+ if max_message_length is not None:
+ options += [
+ ("grpc.max_message_length", max_message_length),
+ ("grpc.max_send_message_length", max_message_length),
+ ("grpc.max_receive_message_length", max_message_length),
+ ]
+
+ return options
+
async def start(self):
self._create_server()
diff --git a/mlserver/settings.py b/mlserver/settings.py
--- a/mlserver/settings.py
+++ b/mlserver/settings.py
@@ -28,6 +28,7 @@
http_port: int = 8080
grpc_port: int = 8081
grpc_workers: int = 10
+ grpc_max_message_length: Optional[int] = None
class ModelParameters(BaseSettings):
|
{"golden_diff": "diff --git a/mlserver/grpc/server.py b/mlserver/grpc/server.py\n--- a/mlserver/grpc/server.py\n+++ b/mlserver/grpc/server.py\n@@ -1,5 +1,6 @@\n from grpc import aio\n from concurrent.futures import ThreadPoolExecutor\n+from typing import Any, List, Tuple\n \n from ..handlers import DataPlane, ModelRepositoryHandlers\n from ..settings import Settings\n@@ -25,8 +26,10 @@\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n+\n self._server = aio.server(\n- ThreadPoolExecutor(max_workers=self._settings.grpc_workers)\n+ ThreadPoolExecutor(max_workers=self._settings.grpc_workers),\n+ options=self._get_options(),\n )\n \n add_GRPCInferenceServiceServicer_to_server(\n@@ -42,6 +45,19 @@\n \n return self._server\n \n+ def _get_options(self) -> List[Tuple[str, Any]]:\n+ options = []\n+\n+ max_message_length = self._settings.grpc_max_message_length\n+ if max_message_length is not None:\n+ options += [\n+ (\"grpc.max_message_length\", max_message_length),\n+ (\"grpc.max_send_message_length\", max_message_length),\n+ (\"grpc.max_receive_message_length\", max_message_length),\n+ ]\n+\n+ return options\n+\n async def start(self):\n self._create_server()\n \ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -28,6 +28,7 @@\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n+ grpc_max_message_length: Optional[int] = None\n \n \n class ModelParameters(BaseSettings):\n", "issue": "gRPC: Received message larger than max (11059277 vs. 4194304)\nWe are using mlserver (gRPC) for serving custom PyTorch ML Model and are sending images of size 640x640x3 . But because of the size we are getting the below error:\r\n\r\n> status = StatusCode.RESOURCE_EXHAUSTED\r\n> \tdetails = \"Received message larger than max (11059277 vs. 4194304)\"\r\n> \tdebug_error_string = \"{\"created\":\"@1630074151.799330000\",\"description\":\"Error received from peer ipv4:0.0.0.0:28081\",\"file\":\"src/core/lib/surface/call.cc\",\"file_line\":1070,\"grpc_message\":\"Received message larger than max (11059277 vs. 4194304)\",\"grpc_status\":8}\"\r\n\r\nSetting the following on client side did not work:\r\n```\r\nmaxMsgLength = 20 * 1024 * 1024\r\nch_options = [('grpc.max_message_length', maxMsgLength),\r\n ('grpc.max_send_message_length', maxMsgLength),\r\n ('grpc.max_receive_message_length', maxMsgLength)]\r\nchannel = grpc.insecure_channel(f'{host}:{port}', options=ch_options)\r\n\r\n```\r\nHowever setting it on server side worked:\r\n```\r\ndef _create_server(self):\r\n self._inference_servicer = InferenceServicer(self._data_plane)\r\n self._model_repository_servicer = ModelRepositoryServicer(\r\n self._model_repository_handlers\r\n )\r\n channel_opt = [('grpc.max_send_message_length', 20 * 1024 * 1024), ('grpc.max_receive_message_length', 20 * 1024 * 1024)]\r\n self._server = aio.server(\r\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers), options = channel_opt\r\n )\r\n\r\n add_GRPCInferenceServiceServicer_to_server(\r\n self._inference_servicer, self._server\r\n )\r\n\r\n```\r\nBut this is not a proper solution, since our mlserver runs in a docker and does a pip install.\r\nThis should be part of the server config in MLServer.\n", "before_files": [{"content": "from grpc import aio\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\nfrom ..settings import Settings\n\nfrom .servicers import InferenceServicer, ModelRepositoryServicer\nfrom .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server\nfrom .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server\n\n\nclass GRPCServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n\n def _create_server(self):\n self._inference_servicer = InferenceServicer(self._data_plane)\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n self._server = aio.server(\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers)\n )\n\n add_GRPCInferenceServiceServicer_to_server(\n self._inference_servicer, self._server\n )\n add_ModelRepositoryServiceServicer_to_server(\n self._model_repository_servicer, self._server\n )\n\n self._server.add_insecure_port(\n f\"{self._settings.host}:{self._settings.grpc_port}\"\n )\n\n return self._server\n\n async def start(self):\n self._create_server()\n\n await self._server.start()\n await self._server.wait_for_termination()\n\n async def stop(self):\n # TODO: Read from config\n await self._server.stop(grace=5)\n", "path": "mlserver/grpc/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository folder\n model_repository_root: str = \".\"\n load_models_at_startup: bool = True\n\n # Server metadata\n server_name: str = \"mlserver\"\n server_version: str = __version__\n extensions: List[str] = []\n\n # Server settings\n host: str = \"0.0.0.0\"\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights.\n The main difference with respect to ModelSettings is that parameters can\n change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n version: Optional[str] = None\n format: Optional[str] = None\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n name: str = \"\"\n\n # Model metadata\n platform: str = \"\"\n versions: Optional[List[str]] = []\n inputs: Optional[List[MetadataTensor]] = []\n outputs: Optional[List[MetadataTensor]] = []\n\n # Parallel settings\n parallel_workers: Optional[int] = 4\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}]}
| 2,110 | 399 |
gh_patches_debug_9670
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-1500
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Internal server error @ fresh install
### Description
Get "Internal server error" with no description after a fresh install. Have I missed a step, or is this a bug?
### Steps to reproduce
...
### Webserver logs
```bash
Nothing shows up using 'docker-compose logs -f'
webserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)
webserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6
```
### Paperless-ngx version
1.8.0
### Host OS
CentOS 7.9 -fully patched with latest patches
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
none
### Other
...
</issue>
<code>
[start of gunicorn.conf.py]
1 import os
2
3 bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
4 workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
5 worker_class = "paperless.workers.ConfigurableWorker"
6 timeout = 120
7
8
9 def pre_fork(server, worker):
10 pass
11
12
13 def pre_exec(server):
14 server.log.info("Forked child, re-executing.")
15
16
17 def when_ready(server):
18 server.log.info("Server is ready. Spawning workers")
19
20
21 def worker_int(worker):
22 worker.log.info("worker received INT or QUIT signal")
23
24 ## get traceback info
25 import threading, sys, traceback
26
27 id2name = {th.ident: th.name for th in threading.enumerate()}
28 code = []
29 for threadId, stack in sys._current_frames().items():
30 code.append("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId))
31 for filename, lineno, name, line in traceback.extract_stack(stack):
32 code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
33 if line:
34 code.append(" %s" % (line.strip()))
35 worker.log.debug("\n".join(code))
36
37
38 def worker_abort(worker):
39 worker.log.info("worker received SIGABRT signal")
40
[end of gunicorn.conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gunicorn.conf.py b/gunicorn.conf.py
--- a/gunicorn.conf.py
+++ b/gunicorn.conf.py
@@ -1,9 +1,17 @@
import os
+# See https://docs.gunicorn.org/en/stable/settings.html for
+# explanations of settings
+
bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
+
workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
worker_class = "paperless.workers.ConfigurableWorker"
timeout = 120
+preload_app = True
+
+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod
+worker_tmp_dir = "/dev/shm"
def pre_fork(server, worker):
|
{"golden_diff": "diff --git a/gunicorn.conf.py b/gunicorn.conf.py\n--- a/gunicorn.conf.py\n+++ b/gunicorn.conf.py\n@@ -1,9 +1,17 @@\n import os\n \n+# See https://docs.gunicorn.org/en/stable/settings.html for\n+# explanations of settings\n+\n bind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n+\n workers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\n worker_class = \"paperless.workers.ConfigurableWorker\"\n timeout = 120\n+preload_app = True\n+\n+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod\n+worker_tmp_dir = \"/dev/shm\"\n \n \n def pre_fork(server, worker):\n", "issue": "[BUG] Internal server error @ fresh install\n### Description\n\nGet \"Internal server error\" with no description after a fresh install. Have I missed a step, or is this a bug?\n\n### Steps to reproduce\n\n...\n\n### Webserver logs\n\n```bash\nNothing shows up using 'docker-compose logs -f'\r\n\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6\n```\n\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nCentOS 7.9 -fully patched with latest patches\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nFirefox\n\n### Configuration changes\n\nnone\n\n### Other\n\n...\n", "before_files": [{"content": "import os\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}]}
| 1,139 | 178 |
gh_patches_debug_39779
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-4477
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Normalise images before handing them to skimage.transform.warp
From @ayshih at https://github.com/sunpy/sunpy/pull/4452#issuecomment-686504683:
> #2041 removed the normalization code and cast everything to float64. However, I believe the removal of the normalization code should not have been done. Scikit-image's documentation is quite clear that it always assumes that floats are in the range of [0, 1]. While warp() does appear to work fine when that assumption is violated, we are currently relying on undefined behavior.
> I suggest that the normalization code be re-inserted.
I believe the work here is to normalise floating point input to `skimage.transform.warp` to the range [0, 1] in `sunpy.image.transform.affine_transform`, and then un-normalise it after the transform.
</issue>
<code>
[start of sunpy/image/transform.py]
1 """
2 Functions for geometrical image transformation and warping.
3 """
4 import numbers
5 import warnings
6
7 import numpy as np
8 import scipy.ndimage.interpolation
9
10 from sunpy.util.exceptions import SunpyUserWarning
11
12 __all__ = ['affine_transform']
13
14
15 def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
16 recenter=False, missing=0.0, use_scipy=False):
17 """
18 Rotates, shifts and scales an image.
19
20 Will use `skimage.transform.warp` unless scikit-image can't be imported
21 then it will use`scipy.ndimage.affine_transform`.
22
23 Parameters
24 ----------
25 image : `numpy.ndarray`
26 2D image to be rotated.
27 rmatrix : `numpy.ndarray` that is 2x2
28 Linear transformation rotation matrix.
29 order : `int` 0-5, optional
30 Interpolation order to be used, defaults to 3. When using scikit-image this parameter
31 is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
32 When using scipy it is passed into
33 `scipy.ndimage.affine_transform` where it controls the order of the spline.
34 scale : `float`
35 A scale factor for the image with the default being no scaling.
36 image_center : tuple, optional
37 The point in the image to rotate around (axis of rotation).
38 Defaults to the center of the array.
39 recenter : `bool` or array-like, optional
40 Move the axis of rotation to the center of the array or recenter coords.
41 Defaults to `True` i.e., recenter to the center of the array.
42 missing : `float`, optional
43 The value to replace any missing data after the transformation.
44 use_scipy : `bool`, optional
45 Force use of `scipy.ndimage.affine_transform`.
46 Will set all "NaNs" in image to zero before doing the transform.
47 Defaults to `False`, unless scikit-image can't be imported.
48
49 Returns
50 -------
51 `numpy.ndarray`:
52 New rotated, scaled and translated image.
53
54 Notes
55 -----
56 This algorithm uses an affine transformation as opposed to a polynomial
57 geometrical transformation, which by default is `skimage.transform.warp`.
58 One can specify using `scipy.ndimage.affine_transform` as
59 an alternative affine transformation. The two transformations use different
60 algorithms and thus do not give identical output.
61
62 When using for `skimage.transform.warp` with order >= 4 or using
63 `scipy.ndimage.affine_transform` at all, "NaN" values will
64 replaced with zero prior to rotation. No attempt is made to retain the NaN
65 values.
66
67 Input arrays with integer data are cast to float 64 and can be re-cast using
68 `numpy.ndarray.astype` if desired.
69
70 Although this function is analogous to the IDL's ``rot`` function, it does not
71 use the same algorithm as the IDL ``rot`` function.
72 IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
73 method to calculate the inverse mapping of original to target pixel
74 coordinates. This is a polynomial geometrical transformation.
75 Then optionally it uses a bicubic convolution interpolation
76 algorithm to map the original to target pixel values.
77 """
78 rmatrix = rmatrix / scale
79 array_center = (np.array(image.shape)[::-1] - 1) / 2.0
80
81 # Make sure the image center is an array and is where it's supposed to be
82 if image_center is not None:
83 image_center = np.asanyarray(image_center)
84 else:
85 image_center = array_center
86
87 # Determine center of rotation based on use (or not) of the recenter keyword
88 if recenter:
89 rot_center = array_center
90 else:
91 rot_center = image_center
92
93 displacement = np.dot(rmatrix, rot_center)
94 shift = image_center - displacement
95 if not use_scipy:
96 try:
97 import skimage.transform
98 except ImportError:
99 warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
100 ImportWarning)
101 use_scipy = True
102 if use_scipy:
103 if np.any(np.isnan(image)):
104 warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
105 # Transform the image using the scipy affine transform
106 rotated_image = scipy.ndimage.interpolation.affine_transform(
107 np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
108 mode='constant', cval=missing).T
109 else:
110 # Make the rotation matrix 3x3 to include translation of the image
111 skmatrix = np.zeros((3, 3))
112 skmatrix[:2, :2] = rmatrix
113 skmatrix[2, 2] = 1.0
114 skmatrix[:2, 2] = shift
115 tform = skimage.transform.AffineTransform(skmatrix)
116
117 if issubclass(image.dtype.type, numbers.Integral):
118 warnings.warn("Integer input data has been cast to float64, "
119 "which is required for the skikit-image transform.",
120 SunpyUserWarning)
121 adjusted_image = image.astype(np.float64)
122 else:
123 adjusted_image = image.copy()
124 if np.any(np.isnan(adjusted_image)) and order >= 4:
125 warnings.warn("Setting NaNs to 0 for higher-order scikit-image rotation.",
126 SunpyUserWarning)
127 adjusted_image = np.nan_to_num(adjusted_image)
128
129 rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,
130 mode='constant', cval=missing,
131 preserve_range=True)
132
133 return rotated_image
134
[end of sunpy/image/transform.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py
--- a/sunpy/image/transform.py
+++ b/sunpy/image/transform.py
@@ -59,14 +59,16 @@
an alternative affine transformation. The two transformations use different
algorithms and thus do not give identical output.
- When using for `skimage.transform.warp` with order >= 4 or using
- `scipy.ndimage.affine_transform` at all, "NaN" values will
- replaced with zero prior to rotation. No attempt is made to retain the NaN
- values.
+ When using `skimage.transform.warp` with order >= 4 or using
+ `scipy.ndimage.affine_transform` at all, "NaN" values will be replaced with
+ zero prior to rotation. No attempt is made to retain the "NaN" values.
Input arrays with integer data are cast to float 64 and can be re-cast using
`numpy.ndarray.astype` if desired.
+ In the case of `skimage.transform.warp`, the image is normalized to [0, 1]
+ before passing it to the function. It is later rescaled back to the original range.
+
Although this function is analogous to the IDL's ``rot`` function, it does not
use the same algorithm as the IDL ``rot`` function.
IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
@@ -115,8 +117,7 @@
tform = skimage.transform.AffineTransform(skmatrix)
if issubclass(image.dtype.type, numbers.Integral):
- warnings.warn("Integer input data has been cast to float64, "
- "which is required for the skikit-image transform.",
+ warnings.warn("Integer input data has been cast to float64.",
SunpyUserWarning)
adjusted_image = image.astype(np.float64)
else:
@@ -126,8 +127,28 @@
SunpyUserWarning)
adjusted_image = np.nan_to_num(adjusted_image)
+ # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)
+ is_nan_image = np.all(np.isnan(adjusted_image))
+ if is_nan_image:
+ adjusted_missing = missing
+ else:
+ im_min = np.nanmin(adjusted_image)
+ adjusted_image -= im_min
+ im_max = np.nanmax(adjusted_image)
+ if im_max > 0:
+ adjusted_image /= im_max
+ adjusted_missing = (missing - im_min) / im_max
+ else:
+ # The input array is all one value (aside from NaNs), so no scaling is needed
+ adjusted_missing = missing - im_min
+
rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,
- mode='constant', cval=missing,
- preserve_range=True)
+ mode='constant', cval=adjusted_missing)
+
+ # Convert the image back to its original range if it is valid
+ if not is_nan_image:
+ if im_max > 0:
+ rotated_image *= im_max
+ rotated_image += im_min
return rotated_image
|
{"golden_diff": "diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py\n--- a/sunpy/image/transform.py\n+++ b/sunpy/image/transform.py\n@@ -59,14 +59,16 @@\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n \n- When using for `skimage.transform.warp` with order >= 4 or using\n- `scipy.ndimage.affine_transform` at all, \"NaN\" values will\n- replaced with zero prior to rotation. No attempt is made to retain the NaN\n- values.\n+ When using `skimage.transform.warp` with order >= 4 or using\n+ `scipy.ndimage.affine_transform` at all, \"NaN\" values will be replaced with\n+ zero prior to rotation. No attempt is made to retain the \"NaN\" values.\n \n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n \n+ In the case of `skimage.transform.warp`, the image is normalized to [0, 1]\n+ before passing it to the function. It is later rescaled back to the original range.\n+\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n@@ -115,8 +117,7 @@\n tform = skimage.transform.AffineTransform(skmatrix)\n \n if issubclass(image.dtype.type, numbers.Integral):\n- warnings.warn(\"Integer input data has been cast to float64, \"\n- \"which is required for the skikit-image transform.\",\n+ warnings.warn(\"Integer input data has been cast to float64.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n@@ -126,8 +127,28 @@\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n \n+ # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)\n+ is_nan_image = np.all(np.isnan(adjusted_image))\n+ if is_nan_image:\n+ adjusted_missing = missing\n+ else:\n+ im_min = np.nanmin(adjusted_image)\n+ adjusted_image -= im_min\n+ im_max = np.nanmax(adjusted_image)\n+ if im_max > 0:\n+ adjusted_image /= im_max\n+ adjusted_missing = (missing - im_min) / im_max\n+ else:\n+ # The input array is all one value (aside from NaNs), so no scaling is needed\n+ adjusted_missing = missing - im_min\n+\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n- mode='constant', cval=missing,\n- preserve_range=True)\n+ mode='constant', cval=adjusted_missing)\n+\n+ # Convert the image back to its original range if it is valid\n+ if not is_nan_image:\n+ if im_max > 0:\n+ rotated_image *= im_max\n+ rotated_image += im_min\n \n return rotated_image\n", "issue": "Normalise images before handing them to skimage.transform.warp\nFrom @ayshih at https://github.com/sunpy/sunpy/pull/4452#issuecomment-686504683:\r\n\r\n> #2041 removed the normalization code and cast everything to float64. However, I believe the removal of the normalization code should not have been done. Scikit-image's documentation is quite clear that it always assumes that floats are in the range of [0, 1]. While warp() does appear to work fine when that assumption is violated, we are currently relying on undefined behavior.\r\n\r\n> I suggest that the normalization code be re-inserted.\r\n\r\nI believe the work here is to normalise floating point input to `skimage.transform.warp` to the range [0, 1] in `sunpy.image.transform.affine_transform`, and then un-normalise it after the transform.\n", "before_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using for `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.affine_transform` at all, \"NaN\" values will\n replaced with zero prior to rotation. No attempt is made to retain the NaN\n values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n if not use_scipy:\n try:\n import skimage.transform\n except ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n use_scipy = True\n if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n if issubclass(image.dtype.type, numbers.Integral):\n warnings.warn(\"Integer input data has been cast to float64, \"\n \"which is required for the skikit-image transform.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\",\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=missing,\n preserve_range=True)\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}]}
| 2,299 | 749 |
gh_patches_debug_3610
|
rasdani/github-patches
|
git_diff
|
google__clusterfuzz-1961
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
js_minimizer is not callable
The following run function of JSMinimizer class is not callable as `js_minimizer` is an object and not a function, thus it should not be callable.
```
js_tokenizer = AntlrTokenizer(JavaScriptLexer)
js_minimizer = JSMinimizer(
utils.test,
max_threads=thread_count,
tokenizer=js_tokenizer.tokenize,
token_combiner=js_tokenizer.combine,
file_extension=file_extension)
result = line_minimizer.minimize(data)
result = js_minimizer(result)
result = js_minimizer(result)
result = line_minimizer.minimize(result)
```
Location in codebase [here](https://github.com/google/clusterfuzz/blob/master/src/python/bot/minimizer/js_minimizer.py#L187).
Also, does js_minimizer should be called two times?
I guess `js_minimizer(result)` should be replaced with `js_minimizer.minimize(data)`, but I am not really sure. Can someone please confirm if it is a bug or not?
</issue>
<code>
[start of src/python/bot/minimizer/js_minimizer.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Minimizer used for additional reduction on javascript test cases."""
15 from __future__ import absolute_import
16
17 from builtins import range
18
19 from . import delta_minimizer
20 from . import errors
21 from . import minimizer
22 from . import utils
23
24 from bot.tokenizer.antlr_tokenizer import AntlrTokenizer
25 from bot.tokenizer.grammars.JavaScriptLexer import JavaScriptLexer
26
27
28 def step_back_while(cur_index, condition):
29 """Helper function. Decreases index from cur while condition is satisfied."""
30 while cur_index >= 0 and condition(cur_index):
31 cur_index -= 1
32 return cur_index
33
34
35 class JSMinimizer(minimizer.Minimizer):
36 """Intended as a second-pass minimizer to remove unneeded tokens from JS."""
37
38 def _execute(self, data):
39 testcase = minimizer.Testcase(data, self)
40 if not self.validate_tokenizer(data, testcase):
41 raise errors.TokenizationFailureError('JS Minimizer')
42
43 brace_stack = []
44 paren_stack = []
45
46 for index, token in enumerate(testcase.tokens):
47 if token == '{':
48 brace_stack.append(index)
49
50 elif token == '}' and brace_stack:
51
52 # Two hypotheses for tokens grouped by curly braces:
53 # 1) Remove from start of line to open brace and the closing brace.
54 # e.g.: if (statement_that_evaluates_to_true) { crash() } -> crash()
55 open_brace_index = brace_stack.pop()
56
57 # Find the first non-empty token prior to the starting brackets.
58 token_before_bracket = step_back_while(
59 open_brace_index - 1, (lambda x: not testcase.tokens[x].strip()))
60
61 # If that token is a close paren, we need to grab everything else too.
62 # Do this to grab the whole paren so we don't create a syntax error by
63 # removing only part of a paren.
64 if testcase.tokens[token_before_bracket] == ')':
65 # Find everything in the paren.
66 token_before_bracket = step_back_while(
67 token_before_bracket, (lambda x: testcase.tokens[x] != '('))
68
69 # Get the token before the paren.
70 token_before_bracket -= 1
71 token_before_bracket = step_back_while(
72 token_before_bracket, (lambda x: not testcase.tokens[x].strip()))
73
74 # Walk back to the start of that line as well to get if/else and funcs.
75 # Do this after paren to manage situations where there are newlines in
76 # the parens.
77 token_before_bracket = step_back_while(
78 token_before_bracket, (lambda x: testcase.tokens[x] != '\n'))
79
80 token_before_bracket += 1
81
82 hypothesis = list(range(token_before_bracket,
83 open_brace_index + 1)) + [index]
84
85 testcase.prepare_test(hypothesis)
86
87 # 2) Remove previous tokens and from the closing brace to the next one.
88 # e.g.: try { crash() } catch(e) {} -> crash().
89 future_index = len(testcase.tokens)
90 open_count = 0
91 for future_index in range(index + 1, len(testcase.tokens)):
92 if testcase.tokens[future_index] == '{':
93 open_count += 1
94 if testcase.tokens[future_index] == '}':
95 open_count -= 1
96 # Make sure to grab entire outer brace if there are inner braces.
97 if not open_count:
98 break
99 if future_index != len(testcase.tokens):
100 lookahead_hypothesis = hypothesis + list(
101 range(index + 1, future_index + 1))
102
103 testcase.prepare_test(lookahead_hypothesis)
104
105 elif token == '(':
106 paren_stack.append(index)
107
108 elif token == ')' and paren_stack:
109 # Three hypotheses for tokens grouped by parentheses:
110 # 1) Remove the parentheses and the previous token.
111 # e.g.: assertTrue(crash()); -> crash()
112 previous_end = paren_stack.pop()
113 if previous_end > 0:
114 hypothesis = [previous_end - 1, previous_end, index]
115 testcase.prepare_test(hypothesis)
116
117 # 2) Remove everything between the parentheses.
118 # e.g. crash(junk, more_junk) -> crash()
119 if index - previous_end > 1:
120 hypothesis = list(range(previous_end + 1, index))
121 testcase.prepare_test(hypothesis)
122
123 # 3) Like 1, but to start of line instead of previous token.
124 # e.g.: leftover_junk = (function() {
125 # });
126
127 # Find the beginning of the line
128 token_before_paren = previous_end
129 token_before_paren = step_back_while(
130 previous_end, (lambda x: testcase.tokens[x] != '\n'))
131 token_before_paren += 1
132
133 hypothesis = list(range(token_before_paren, previous_end + 1)) + [index]
134 testcase.prepare_test(hypothesis)
135
136 # 4) Like 3, but also from the closing brace to the next one.
137 # e.g.: (function(global) { })(this);
138 future_index = len(testcase.tokens)
139 for future_index in range(index + 1, len(testcase.tokens)):
140 if testcase.tokens[future_index] == ')':
141 break
142 if future_index != len(testcase.tokens):
143 lookahead_hypothesis = list(
144 range(token_before_paren, future_index + 1))
145 testcase.prepare_test(lookahead_hypothesis)
146
147 elif token == ',':
148 # Two hypotheses for commas:
149 # 1) Remove comma and left-hand-side.
150 # e.g.: f(whatever, crash()) -> f(crash())
151 if index > 0:
152 hypothesis = [index - 1, index]
153 testcase.prepare_test(hypothesis)
154
155 # 2) Remove comma and right-hand-side.
156 # e.g.: f(crash(), whatever) -> f(crash())
157
158 # Find the next non whitespace token after the comma.
159 hypothesis = [index]
160 for right_token_index in range(index + 1, len(testcase.tokens)):
161 hypothesis.append(right_token_index)
162 if testcase.tokens[right_token_index].strip():
163 testcase.prepare_test(hypothesis)
164 break
165
166 testcase.process()
167 return testcase
168
169 @staticmethod
170 def run(data,
171 thread_count=minimizer.DEFAULT_THREAD_COUNT,
172 file_extension='.js'):
173 """Attempt to minimize a javascript test case."""
174 line_minimizer = delta_minimizer.DeltaMinimizer(
175 utils.test, max_threads=thread_count, file_extension=file_extension)
176
177 js_tokenizer = AntlrTokenizer(JavaScriptLexer)
178
179 js_minimizer = JSMinimizer(
180 utils.test,
181 max_threads=thread_count,
182 tokenizer=js_tokenizer.tokenize,
183 token_combiner=js_tokenizer.combine,
184 file_extension=file_extension)
185
186 result = line_minimizer.minimize(data)
187 result = js_minimizer(result)
188 result = js_minimizer(result)
189 result = line_minimizer.minimize(result)
190
191 return result
192
[end of src/python/bot/minimizer/js_minimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/bot/minimizer/js_minimizer.py b/src/python/bot/minimizer/js_minimizer.py
--- a/src/python/bot/minimizer/js_minimizer.py
+++ b/src/python/bot/minimizer/js_minimizer.py
@@ -184,8 +184,8 @@
file_extension=file_extension)
result = line_minimizer.minimize(data)
- result = js_minimizer(result)
- result = js_minimizer(result)
+ result = js_minimizer.minimize(result)
+ result = js_minimizer.minimize(result)
result = line_minimizer.minimize(result)
return result
|
{"golden_diff": "diff --git a/src/python/bot/minimizer/js_minimizer.py b/src/python/bot/minimizer/js_minimizer.py\n--- a/src/python/bot/minimizer/js_minimizer.py\n+++ b/src/python/bot/minimizer/js_minimizer.py\n@@ -184,8 +184,8 @@\n file_extension=file_extension)\n \n result = line_minimizer.minimize(data)\n- result = js_minimizer(result)\n- result = js_minimizer(result)\n+ result = js_minimizer.minimize(result)\n+ result = js_minimizer.minimize(result)\n result = line_minimizer.minimize(result)\n \n return result\n", "issue": "js_minimizer is not callable\nThe following run function of JSMinimizer class is not callable as `js_minimizer` is an object and not a function, thus it should not be callable.\r\n\r\n```\r\n js_tokenizer = AntlrTokenizer(JavaScriptLexer)\r\n\r\n js_minimizer = JSMinimizer(\r\n utils.test,\r\n max_threads=thread_count,\r\n tokenizer=js_tokenizer.tokenize,\r\n token_combiner=js_tokenizer.combine,\r\n file_extension=file_extension)\r\n\r\n result = line_minimizer.minimize(data)\r\n result = js_minimizer(result)\r\n result = js_minimizer(result)\r\n result = line_minimizer.minimize(result)\r\n```\r\n\r\nLocation in codebase [here](https://github.com/google/clusterfuzz/blob/master/src/python/bot/minimizer/js_minimizer.py#L187).\r\n\r\nAlso, does js_minimizer should be called two times?\r\nI guess `js_minimizer(result)` should be replaced with `js_minimizer.minimize(data)`, but I am not really sure. Can someone please confirm if it is a bug or not?\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Minimizer used for additional reduction on javascript test cases.\"\"\"\nfrom __future__ import absolute_import\n\nfrom builtins import range\n\nfrom . import delta_minimizer\nfrom . import errors\nfrom . import minimizer\nfrom . import utils\n\nfrom bot.tokenizer.antlr_tokenizer import AntlrTokenizer\nfrom bot.tokenizer.grammars.JavaScriptLexer import JavaScriptLexer\n\n\ndef step_back_while(cur_index, condition):\n \"\"\"Helper function. Decreases index from cur while condition is satisfied.\"\"\"\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index\n\n\nclass JSMinimizer(minimizer.Minimizer):\n \"\"\"Intended as a second-pass minimizer to remove unneeded tokens from JS.\"\"\"\n\n def _execute(self, data):\n testcase = minimizer.Testcase(data, self)\n if not self.validate_tokenizer(data, testcase):\n raise errors.TokenizationFailureError('JS Minimizer')\n\n brace_stack = []\n paren_stack = []\n\n for index, token in enumerate(testcase.tokens):\n if token == '{':\n brace_stack.append(index)\n\n elif token == '}' and brace_stack:\n\n # Two hypotheses for tokens grouped by curly braces:\n # 1) Remove from start of line to open brace and the closing brace.\n # e.g.: if (statement_that_evaluates_to_true) { crash() } -> crash()\n open_brace_index = brace_stack.pop()\n\n # Find the first non-empty token prior to the starting brackets.\n token_before_bracket = step_back_while(\n open_brace_index - 1, (lambda x: not testcase.tokens[x].strip()))\n\n # If that token is a close paren, we need to grab everything else too.\n # Do this to grab the whole paren so we don't create a syntax error by\n # removing only part of a paren.\n if testcase.tokens[token_before_bracket] == ')':\n # Find everything in the paren.\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: testcase.tokens[x] != '('))\n\n # Get the token before the paren.\n token_before_bracket -= 1\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: not testcase.tokens[x].strip()))\n\n # Walk back to the start of that line as well to get if/else and funcs.\n # Do this after paren to manage situations where there are newlines in\n # the parens.\n token_before_bracket = step_back_while(\n token_before_bracket, (lambda x: testcase.tokens[x] != '\\n'))\n\n token_before_bracket += 1\n\n hypothesis = list(range(token_before_bracket,\n open_brace_index + 1)) + [index]\n\n testcase.prepare_test(hypothesis)\n\n # 2) Remove previous tokens and from the closing brace to the next one.\n # e.g.: try { crash() } catch(e) {} -> crash().\n future_index = len(testcase.tokens)\n open_count = 0\n for future_index in range(index + 1, len(testcase.tokens)):\n if testcase.tokens[future_index] == '{':\n open_count += 1\n if testcase.tokens[future_index] == '}':\n open_count -= 1\n # Make sure to grab entire outer brace if there are inner braces.\n if not open_count:\n break\n if future_index != len(testcase.tokens):\n lookahead_hypothesis = hypothesis + list(\n range(index + 1, future_index + 1))\n\n testcase.prepare_test(lookahead_hypothesis)\n\n elif token == '(':\n paren_stack.append(index)\n\n elif token == ')' and paren_stack:\n # Three hypotheses for tokens grouped by parentheses:\n # 1) Remove the parentheses and the previous token.\n # e.g.: assertTrue(crash()); -> crash()\n previous_end = paren_stack.pop()\n if previous_end > 0:\n hypothesis = [previous_end - 1, previous_end, index]\n testcase.prepare_test(hypothesis)\n\n # 2) Remove everything between the parentheses.\n # e.g. crash(junk, more_junk) -> crash()\n if index - previous_end > 1:\n hypothesis = list(range(previous_end + 1, index))\n testcase.prepare_test(hypothesis)\n\n # 3) Like 1, but to start of line instead of previous token.\n # e.g.: leftover_junk = (function() {\n # });\n\n # Find the beginning of the line\n token_before_paren = previous_end\n token_before_paren = step_back_while(\n previous_end, (lambda x: testcase.tokens[x] != '\\n'))\n token_before_paren += 1\n\n hypothesis = list(range(token_before_paren, previous_end + 1)) + [index]\n testcase.prepare_test(hypothesis)\n\n # 4) Like 3, but also from the closing brace to the next one.\n # e.g.: (function(global) { })(this);\n future_index = len(testcase.tokens)\n for future_index in range(index + 1, len(testcase.tokens)):\n if testcase.tokens[future_index] == ')':\n break\n if future_index != len(testcase.tokens):\n lookahead_hypothesis = list(\n range(token_before_paren, future_index + 1))\n testcase.prepare_test(lookahead_hypothesis)\n\n elif token == ',':\n # Two hypotheses for commas:\n # 1) Remove comma and left-hand-side.\n # e.g.: f(whatever, crash()) -> f(crash())\n if index > 0:\n hypothesis = [index - 1, index]\n testcase.prepare_test(hypothesis)\n\n # 2) Remove comma and right-hand-side.\n # e.g.: f(crash(), whatever) -> f(crash())\n\n # Find the next non whitespace token after the comma.\n hypothesis = [index]\n for right_token_index in range(index + 1, len(testcase.tokens)):\n hypothesis.append(right_token_index)\n if testcase.tokens[right_token_index].strip():\n testcase.prepare_test(hypothesis)\n break\n\n testcase.process()\n return testcase\n\n @staticmethod\n def run(data,\n thread_count=minimizer.DEFAULT_THREAD_COUNT,\n file_extension='.js'):\n \"\"\"Attempt to minimize a javascript test case.\"\"\"\n line_minimizer = delta_minimizer.DeltaMinimizer(\n utils.test, max_threads=thread_count, file_extension=file_extension)\n\n js_tokenizer = AntlrTokenizer(JavaScriptLexer)\n\n js_minimizer = JSMinimizer(\n utils.test,\n max_threads=thread_count,\n tokenizer=js_tokenizer.tokenize,\n token_combiner=js_tokenizer.combine,\n file_extension=file_extension)\n\n result = line_minimizer.minimize(data)\n result = js_minimizer(result)\n result = js_minimizer(result)\n result = line_minimizer.minimize(result)\n\n return result\n", "path": "src/python/bot/minimizer/js_minimizer.py"}]}
| 2,878 | 138 |
gh_patches_debug_31683
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-19089
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bernoulli
</issue>
<code>
[start of ivy/functional/backends/paddle/experimental/random.py]
1 # global
2 from typing import Optional, Union, Sequence
3 import paddle
4
5 from ivy import with_unsupported_device_and_dtypes
6 from ivy.functional.backends.paddle import backend_version
7 from ivy.utils.exceptions import IvyNotImplementedException
8
9 # local
10 import ivy
11 from paddle.device import core
12
13 # dirichlet
14
15
16 @with_unsupported_device_and_dtypes(
17 {
18 "2.5.1 and below": {
19 "cpu": (
20 "int8",
21 "int16",
22 "uint8",
23 "float16",
24 "complex64",
25 "complex128",
26 "bool",
27 )
28 }
29 },
30 backend_version,
31 )
32 def dirichlet(
33 alpha: Union[paddle.Tensor, float, Sequence[float]],
34 /,
35 *,
36 size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
37 out: Optional[paddle.Tensor] = None,
38 seed: Optional[int] = None,
39 dtype: Optional[paddle.dtype] = None,
40 ) -> paddle.Tensor:
41 size = size if size is not None else len(alpha)
42 dtype = dtype if dtype is not None else paddle.float64
43 if seed is not None:
44 paddle.seed(seed)
45 res = paddle.to_tensor(
46 paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),
47 dtype=dtype,
48 )
49 return res
50
51
52 def beta(
53 alpha: Union[float, paddle.Tensor],
54 beta: Union[float, paddle.Tensor],
55 /,
56 *,
57 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
58 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
59 device: core.Place = None,
60 seed: Optional[int] = None,
61 out: Optional[paddle.Tensor] = None,
62 ) -> paddle.Tensor:
63 raise IvyNotImplementedException()
64
65
66 def gamma(
67 alpha: Union[float, paddle.Tensor],
68 beta: Union[float, paddle.Tensor],
69 /,
70 *,
71 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
72 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
73 device: core.Place = None,
74 seed: Optional[int] = None,
75 out: Optional[paddle.Tensor] = None,
76 ) -> paddle.Tensor:
77 raise IvyNotImplementedException()
78
79
80 def poisson(
81 lam: Union[float, paddle.Tensor],
82 *,
83 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
84 device: core.Place,
85 dtype: paddle.dtype,
86 seed: Optional[int] = None,
87 fill_value: Optional[Union[float, int]] = 0,
88 out: Optional[paddle.Tensor] = None,
89 ):
90 raise IvyNotImplementedException()
91
92
93 def bernoulli(
94 probs: Union[float, paddle.Tensor],
95 *,
96 logits: Union[float, paddle.Tensor] = None,
97 shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,
98 device: core.Place,
99 dtype: paddle.dtype,
100 seed: Optional[int] = None,
101 out: Optional[paddle.Tensor] = None,
102 ) -> paddle.Tensor:
103 raise IvyNotImplementedException()
104
[end of ivy/functional/backends/paddle/experimental/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py
--- a/ivy/functional/backends/paddle/experimental/random.py
+++ b/ivy/functional/backends/paddle/experimental/random.py
@@ -1,7 +1,7 @@
# global
from typing import Optional, Union, Sequence
import paddle
-
+from ivy.functional.backends.paddle.device import to_device
from ivy import with_unsupported_device_and_dtypes
from ivy.functional.backends.paddle import backend_version
from ivy.utils.exceptions import IvyNotImplementedException
@@ -9,6 +9,7 @@
# local
import ivy
from paddle.device import core
+from ivy import with_supported_device_and_dtypes
# dirichlet
@@ -90,6 +91,23 @@
raise IvyNotImplementedException()
+# bernoulli
+@with_supported_device_and_dtypes(
+ {
+ "2.5.0 and above": {
+ "cpu": ("float32", "float64"),
+ "gpu": ("bfloat16", "float16", "float32", "float64"),
+ },
+ "2.4.2 and below": {
+ "cpu": (
+ "float32",
+ "float64",
+ ),
+ "gpu": ("float16", "float32", "float64"),
+ },
+ },
+ backend_version,
+)
def bernoulli(
probs: Union[float, paddle.Tensor],
*,
@@ -100,4 +118,14 @@
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
- raise IvyNotImplementedException()
+ if seed is not None:
+ paddle.seed(seed)
+ if probs is not None:
+ probs = probs
+ elif logits is not None:
+ probs = ivy.softmax(logits)
+ probs = paddle.cast(probs, dtype)
+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs
+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))
+ sample = paddle.bernoulli(probs)
+ return to_device(sample, device)
|
{"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py\n--- a/ivy/functional/backends/paddle/experimental/random.py\n+++ b/ivy/functional/backends/paddle/experimental/random.py\n@@ -1,7 +1,7 @@\n # global\n from typing import Optional, Union, Sequence\n import paddle\n-\n+from ivy.functional.backends.paddle.device import to_device\n from ivy import with_unsupported_device_and_dtypes\n from ivy.functional.backends.paddle import backend_version\n from ivy.utils.exceptions import IvyNotImplementedException\n@@ -9,6 +9,7 @@\n # local\n import ivy\n from paddle.device import core\n+from ivy import with_supported_device_and_dtypes\n \n # dirichlet\n \n@@ -90,6 +91,23 @@\n raise IvyNotImplementedException()\n \n \n+# bernoulli\n+@with_supported_device_and_dtypes(\n+ {\n+ \"2.5.0 and above\": {\n+ \"cpu\": (\"float32\", \"float64\"),\n+ \"gpu\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n+ },\n+ \"2.4.2 and below\": {\n+ \"cpu\": (\n+ \"float32\",\n+ \"float64\",\n+ ),\n+ \"gpu\": (\"float16\", \"float32\", \"float64\"),\n+ },\n+ },\n+ backend_version,\n+)\n def bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n@@ -100,4 +118,14 @@\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n ) -> paddle.Tensor:\n- raise IvyNotImplementedException()\n+ if seed is not None:\n+ paddle.seed(seed)\n+ if probs is not None:\n+ probs = probs\n+ elif logits is not None:\n+ probs = ivy.softmax(logits)\n+ probs = paddle.cast(probs, dtype)\n+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs\n+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))\n+ sample = paddle.bernoulli(probs)\n+ return to_device(sample, device)\n", "issue": "bernoulli\n\n", "before_files": [{"content": "# global\nfrom typing import Optional, Union, Sequence\nimport paddle\n\nfrom ivy import with_unsupported_device_and_dtypes\nfrom ivy.functional.backends.paddle import backend_version\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n# local\nimport ivy\nfrom paddle.device import core\n\n# dirichlet\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef dirichlet(\n alpha: Union[paddle.Tensor, float, Sequence[float]],\n /,\n *,\n size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n out: Optional[paddle.Tensor] = None,\n seed: Optional[int] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n size = size if size is not None else len(alpha)\n dtype = dtype if dtype is not None else paddle.float64\n if seed is not None:\n paddle.seed(seed)\n res = paddle.to_tensor(\n paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),\n dtype=dtype,\n )\n return res\n\n\ndef beta(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef gamma(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef poisson(\n lam: Union[float, paddle.Tensor],\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n fill_value: Optional[Union[float, int]] = 0,\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\ndef bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n logits: Union[float, paddle.Tensor] = None,\n shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/random.py"}]}
| 1,447 | 521 |
gh_patches_debug_27388
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-2787
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RecursionError when installing mkdocs-material
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Windows 10 Pro 1909 18363.719
- **Poetry version**: 1.0.5
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68
## Issue
Hi,
when running: `poetry install -E docs_material` I get the following error:
```
[RecursionError]
maximum recursion depth exceeded
```
There are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?
Thank you for poetry and your effort :)
</issue>
<code>
[start of poetry/utils/extras.py]
1 from typing import Iterator
2 from typing import List
3 from typing import Mapping
4 from typing import Sequence
5
6 from poetry.core.packages import Package
7 from poetry.utils.helpers import canonicalize_name
8
9
10 def get_extra_package_names(
11 packages, # type: Sequence[Package]
12 extras, # type: Mapping[str, List[str]]
13 extra_names, # type: Sequence[str]
14 ): # type: (...) -> Iterator[str]
15 """
16 Returns all package names required by the given extras.
17
18 :param packages: A collection of packages, such as from Repository.packages
19 :param extras: A mapping of `extras` names to lists of package names, as defined
20 in the `extras` section of `poetry.lock`.
21 :param extra_names: A list of strings specifying names of extra groups to resolve.
22 """
23 if not extra_names:
24 return []
25
26 # lookup for packages by name, faster than looping over packages repeatedly
27 packages_by_name = {package.name: package for package in packages}
28
29 # get and flatten names of packages we've opted into as extras
30 extra_package_names = [
31 canonicalize_name(extra_package_name)
32 for extra_name in extra_names
33 for extra_package_name in extras.get(extra_name, ())
34 ]
35
36 def _extra_packages(package_names):
37 """Recursively find dependencies for packages names"""
38 # for each extra pacakge name
39 for package_name in package_names:
40 # Find the actual Package object. A missing key indicates an implicit
41 # dependency (like setuptools), which should be ignored
42 package = packages_by_name.get(canonicalize_name(package_name))
43 if package:
44 yield package.name
45 # Recurse for dependencies
46 for dependency_package_name in _extra_packages(
47 dependency.name for dependency in package.requires
48 ):
49 yield dependency_package_name
50
51 return _extra_packages(extra_package_names)
52
[end of poetry/utils/extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py
--- a/poetry/utils/extras.py
+++ b/poetry/utils/extras.py
@@ -33,6 +33,9 @@
for extra_package_name in extras.get(extra_name, ())
]
+ # keep record of packages seen during recursion in order to avoid recursion error
+ seen_package_names = set()
+
def _extra_packages(package_names):
"""Recursively find dependencies for packages names"""
# for each extra pacakge name
@@ -41,11 +44,16 @@
# dependency (like setuptools), which should be ignored
package = packages_by_name.get(canonicalize_name(package_name))
if package:
- yield package.name
+ if package.name not in seen_package_names:
+ seen_package_names.add(package.name)
+ yield package.name
# Recurse for dependencies
for dependency_package_name in _extra_packages(
- dependency.name for dependency in package.requires
+ dependency.name
+ for dependency in package.requires
+ if dependency.name not in seen_package_names
):
+ seen_package_names.add(dependency_package_name)
yield dependency_package_name
return _extra_packages(extra_package_names)
|
{"golden_diff": "diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py\n--- a/poetry/utils/extras.py\n+++ b/poetry/utils/extras.py\n@@ -33,6 +33,9 @@\n for extra_package_name in extras.get(extra_name, ())\n ]\n \n+ # keep record of packages seen during recursion in order to avoid recursion error\n+ seen_package_names = set()\n+\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n@@ -41,11 +44,16 @@\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n- yield package.name\n+ if package.name not in seen_package_names:\n+ seen_package_names.add(package.name)\n+ yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n- dependency.name for dependency in package.requires\n+ dependency.name\n+ for dependency in package.requires\n+ if dependency.name not in seen_package_names\n ):\n+ seen_package_names.add(dependency_package_name)\n yield dependency_package_name\n \n return _extra_packages(extra_package_names)\n", "issue": "RecursionError when installing mkdocs-material\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Windows 10 Pro 1909 18363.719\r\n- **Poetry version**: 1.0.5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68\r\n\r\n## Issue\r\nHi,\r\n\r\nwhen running: `poetry install -E docs_material` I get the following error:\r\n\r\n```\r\n[RecursionError]\r\nmaximum recursion depth exceeded\r\n```\r\n\r\nThere are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?\r\n\r\nThank you for poetry and your effort :)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Iterator\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Sequence\n\nfrom poetry.core.packages import Package\nfrom poetry.utils.helpers import canonicalize_name\n\n\ndef get_extra_package_names(\n packages, # type: Sequence[Package]\n extras, # type: Mapping[str, List[str]]\n extra_names, # type: Sequence[str]\n): # type: (...) -> Iterator[str]\n \"\"\"\n Returns all package names required by the given extras.\n\n :param packages: A collection of packages, such as from Repository.packages\n :param extras: A mapping of `extras` names to lists of package names, as defined\n in the `extras` section of `poetry.lock`.\n :param extra_names: A list of strings specifying names of extra groups to resolve.\n \"\"\"\n if not extra_names:\n return []\n\n # lookup for packages by name, faster than looping over packages repeatedly\n packages_by_name = {package.name: package for package in packages}\n\n # get and flatten names of packages we've opted into as extras\n extra_package_names = [\n canonicalize_name(extra_package_name)\n for extra_name in extra_names\n for extra_package_name in extras.get(extra_name, ())\n ]\n\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n for package_name in package_names:\n # Find the actual Package object. A missing key indicates an implicit\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n dependency.name for dependency in package.requires\n ):\n yield dependency_package_name\n\n return _extra_packages(extra_package_names)\n", "path": "poetry/utils/extras.py"}]}
| 1,324 | 276 |
gh_patches_debug_33896
|
rasdani/github-patches
|
git_diff
|
voxel51__fiftyone-3436
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Group sample in modal missing attributes
In `0.21.6`, a group sample in the modal will be missing attribute data unless the fields are added via `add_dynamic_sample_fields()` or `add_dynamic_frame_fields()`. This is due a subtle detail in the `relay` cache, which is straightforward to resolve.
```py
import fiftyone as fo
dataset = foz.load_zoo_dataset("quickstart-groups")
for sample in dataset:
for detection in sample.ground_truth.detections:
detection["my_attr"] = "value"
sample.save()
# tooltip will not contain `my_attr` on detections
session = fo.launch_app(dataset)
# will force `my_attr` to appear
dataset.add_dynamic_sample_fields()
```
</issue>
<code>
[start of fiftyone/server/routes/media.py]
1 """
2 FiftyOne Server /media route
3
4 | Copyright 2017-2023, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 import typing as t
9
10 import aiofiles
11 from aiofiles.threadpool.binary import AsyncBufferedReader
12 from aiofiles.os import stat as aio_stat
13 from starlette.endpoints import HTTPEndpoint
14 from starlette.requests import Request
15 from starlette.responses import (
16 FileResponse,
17 Response,
18 StreamingResponse,
19 guess_type,
20 )
21
22
23 async def ranged(
24 file: AsyncBufferedReader,
25 start: int = 0,
26 end: int = None,
27 block_size: int = 8192,
28 ) -> t.AsyncGenerator:
29 consumed = 0
30
31 await file.seek(start)
32
33 while True:
34 data_length = (
35 min(block_size, end - start - consumed) if end else block_size
36 )
37
38 if data_length <= 0:
39 break
40
41 data = await file.read(data_length)
42
43 if not data:
44 break
45
46 consumed += data_length
47
48 yield data
49
50 if hasattr(file, "close"):
51 await file.close()
52
53
54 class Media(HTTPEndpoint):
55 async def get(
56 self, request: Request
57 ) -> t.Union[FileResponse, StreamingResponse]:
58 path = request.query_params["filepath"]
59
60 response: t.Union[FileResponse, StreamingResponse]
61 if request.headers.get("range"):
62 response = await self.ranged_file_response(path, request)
63 else:
64 response = FileResponse(
65 path,
66 )
67 response.headers["Accept-Ranges"] = "bytes"
68
69 return response
70
71 async def ranged_file_response(
72 self, path: str, request: Request
73 ) -> StreamingResponse:
74 file = await aiofiles.open(path, "rb")
75 file_size = (await aio_stat(path)).st_size
76 content_range = request.headers.get("range")
77 content_length = file_size
78 status_code = 200
79 headers = {}
80
81 if content_range is not None:
82 content_range = content_range.strip().lower()
83
84 content_ranges = content_range.split("=")[-1]
85
86 range_start, range_end, *_ = map(
87 str.strip, (content_ranges + "-").split("-")
88 )
89
90 start, end = (
91 int(range_start) if range_start else 0,
92 int(range_end) if range_end else file_size - 1,
93 )
94 range_start = max(0, start)
95 range_end = min(file_size - 1, int(end))
96
97 content_length = (end - start) + 1
98
99 file_response = ranged(file, start=start, end=end + 1)
100
101 status_code = 206
102
103 headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
104
105 response = StreamingResponse(
106 file_response,
107 media_type=guess_type(path)[0],
108 status_code=status_code,
109 )
110
111 response.headers.update(
112 {
113 "Accept-Ranges": "bytes",
114 "Content-Length": str(content_length),
115 **headers,
116 }
117 )
118
119 return response
120
121 async def head(self, request: Request) -> Response:
122 path = request.query_params["filepath"]
123 response = Response()
124 size = (await aio_stat(path)).st_size
125 response.headers.update(
126 {
127 "Accept-Ranges": "bytes",
128 "Content-Type": guess_type(path)[0],
129 "Content-Length": size,
130 }
131 )
132 return response
133
134 async def options(self, request: Request) -> Response:
135 response = Response()
136 response.headers["Accept-Ranges"] = "bytes"
137 response.headers["Allow"] = "OPTIONS, GET, HEAD"
138 return response
139
[end of fiftyone/server/routes/media.py]
[start of fiftyone/server/samples.py]
1 """
2 FiftyOne Server samples pagination
3
4 | Copyright 2017-2023, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 import asyncio
9 import strawberry as gql
10 import typing as t
11
12
13 from fiftyone.core.collections import SampleCollection
14 import fiftyone.core.media as fom
15 import fiftyone.core.odm as foo
16 from fiftyone.core.utils import run_sync_task
17
18 from fiftyone.server.filters import SampleFilter
19 import fiftyone.server.metadata as fosm
20 from fiftyone.server.paginator import Connection, Edge, PageInfo
21 from fiftyone.server.scalars import BSON, JSON, BSONArray
22 from fiftyone.server.utils import from_dict
23 import fiftyone.server.view as fosv
24
25
26 @gql.type
27 class MediaURL:
28 field: str
29 url: t.Optional[str]
30
31
32 @gql.interface
33 class Sample:
34 id: gql.ID
35 sample: JSON
36 urls: t.List[MediaURL]
37 aspect_ratio: float
38
39
40 @gql.type
41 class ImageSample(Sample):
42 pass
43
44
45 @gql.type
46 class PointCloudSample(Sample):
47 pass
48
49
50 @gql.type
51 class VideoSample(Sample):
52 frame_number: int
53 frame_rate: float
54
55
56 SampleItem = gql.union(
57 "SampleItem", types=(ImageSample, PointCloudSample, VideoSample)
58 )
59
60 MEDIA_TYPES = {
61 fom.IMAGE: ImageSample,
62 fom.POINT_CLOUD: PointCloudSample,
63 fom.VIDEO: VideoSample,
64 }
65
66
67 async def paginate_samples(
68 dataset: str,
69 stages: BSONArray,
70 filters: JSON,
71 first: int,
72 after: t.Optional[str] = None,
73 extended_stages: t.Optional[BSON] = None,
74 sample_filter: t.Optional[SampleFilter] = None,
75 pagination_data: t.Optional[bool] = False,
76 ) -> Connection[t.Union[ImageSample, VideoSample], str]:
77 run = lambda reload: fosv.get_view(
78 dataset,
79 stages=stages,
80 filters=filters,
81 pagination_data=pagination_data,
82 extended_stages=extended_stages,
83 sample_filter=sample_filter,
84 reload=reload,
85 )
86 try:
87 view = await run_sync_task(run, False)
88 except:
89 view = await run_sync_task(run, True)
90
91 # check frame field schema explicitly, media type is not reliable for groups
92 has_frames = view.get_frame_field_schema() is not None
93
94 # TODO: Remove this once we have a better way to handle large videos. This
95 # is a temporary fix to reduce the $lookup overhead for sample frames on
96 # full datasets.
97 full_lookup = has_frames and (filters or stages)
98 support = [1, 1] if not full_lookup else None
99 if after is None:
100 after = "-1"
101
102 if int(after) > -1:
103 view = view.skip(int(after) + 1)
104
105 pipeline = view._pipeline(
106 attach_frames=has_frames,
107 detach_frames=False,
108 manual_group_select=sample_filter
109 and sample_filter.group
110 and (sample_filter.group.id and not sample_filter.group.slices),
111 support=support,
112 )
113
114 # Only return the first frame of each video sample for the grid thumbnail
115 if has_frames:
116 pipeline.append({"$addFields": {"frames": {"$slice": ["$frames", 1]}}})
117
118 samples = await foo.aggregate(
119 foo.get_async_db_conn()[view._dataset._sample_collection_name],
120 pipeline,
121 ).to_list(first + 1)
122
123 more = False
124 if len(samples) > first:
125 samples = samples[:first]
126 more = True
127
128 metadata_cache = {}
129 url_cache = {}
130 nodes = await asyncio.gather(
131 *[
132 _create_sample_item(view, sample, metadata_cache, url_cache)
133 for sample in samples
134 ]
135 )
136
137 edges = []
138 for idx, node in enumerate(nodes):
139 edges.append(
140 Edge(
141 node=node,
142 cursor=str(idx + int(after) + 1),
143 )
144 )
145
146 return Connection(
147 page_info=PageInfo(
148 has_previous_page=False,
149 has_next_page=more,
150 start_cursor=edges[0].cursor if edges else None,
151 end_cursor=edges[-1].cursor if len(edges) > 1 else None,
152 ),
153 edges=edges,
154 )
155
156
157 async def _create_sample_item(
158 dataset: SampleCollection,
159 sample: t.Dict,
160 metadata_cache: t.Dict[str, t.Dict],
161 url_cache: t.Dict[str, str],
162 ) -> SampleItem:
163 media_type = fom.get_media_type(sample["filepath"])
164
165 if media_type == fom.IMAGE:
166 cls = ImageSample
167 elif media_type == fom.VIDEO:
168 cls = VideoSample
169 elif media_type == fom.POINT_CLOUD:
170 cls = PointCloudSample
171 else:
172 raise ValueError(f"unknown media type '{media_type}'")
173
174 metadata = await fosm.get_metadata(
175 dataset, sample, media_type, metadata_cache, url_cache
176 )
177
178 if cls == VideoSample:
179 metadata = dict(**metadata, frame_number=sample.get("frame_number", 1))
180
181 return from_dict(cls, {"id": sample["_id"], "sample": sample, **metadata})
182
[end of fiftyone/server/samples.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fiftyone/server/routes/media.py b/fiftyone/server/routes/media.py
--- a/fiftyone/server/routes/media.py
+++ b/fiftyone/server/routes/media.py
@@ -5,8 +5,10 @@
| `voxel51.com <https://voxel51.com/>`_
|
"""
+import os
import typing as t
+import anyio
import aiofiles
from aiofiles.threadpool.binary import AsyncBufferedReader
from aiofiles.os import stat as aio_stat
@@ -58,6 +60,12 @@
path = request.query_params["filepath"]
response: t.Union[FileResponse, StreamingResponse]
+
+ try:
+ await anyio.to_thread.run_sync(os.stat, path)
+ except FileNotFoundError:
+ return Response(content="Not found", status_code=404)
+
if request.headers.get("range"):
response = await self.ranged_file_response(path, request)
else:
diff --git a/fiftyone/server/samples.py b/fiftyone/server/samples.py
--- a/fiftyone/server/samples.py
+++ b/fiftyone/server/samples.py
@@ -129,7 +129,9 @@
url_cache = {}
nodes = await asyncio.gather(
*[
- _create_sample_item(view, sample, metadata_cache, url_cache)
+ _create_sample_item(
+ view, sample, metadata_cache, url_cache, pagination_data
+ )
for sample in samples
]
)
@@ -159,6 +161,7 @@
sample: t.Dict,
metadata_cache: t.Dict[str, t.Dict],
url_cache: t.Dict[str, str],
+ pagination_data: bool,
) -> SampleItem:
media_type = fom.get_media_type(sample["filepath"])
@@ -178,4 +181,9 @@
if cls == VideoSample:
metadata = dict(**metadata, frame_number=sample.get("frame_number", 1))
- return from_dict(cls, {"id": sample["_id"], "sample": sample, **metadata})
+ _id = sample["_id"]
+
+ if not pagination_data:
+ _id = f"{_id}-modal"
+
+ return from_dict(cls, {"id": _id, "sample": sample, **metadata})
|
{"golden_diff": "diff --git a/fiftyone/server/routes/media.py b/fiftyone/server/routes/media.py\n--- a/fiftyone/server/routes/media.py\n+++ b/fiftyone/server/routes/media.py\n@@ -5,8 +5,10 @@\n | `voxel51.com <https://voxel51.com/>`_\n |\n \"\"\"\n+import os\n import typing as t\n \n+import anyio\n import aiofiles\n from aiofiles.threadpool.binary import AsyncBufferedReader\n from aiofiles.os import stat as aio_stat\n@@ -58,6 +60,12 @@\n path = request.query_params[\"filepath\"]\n \n response: t.Union[FileResponse, StreamingResponse]\n+\n+ try:\n+ await anyio.to_thread.run_sync(os.stat, path)\n+ except FileNotFoundError:\n+ return Response(content=\"Not found\", status_code=404)\n+\n if request.headers.get(\"range\"):\n response = await self.ranged_file_response(path, request)\n else:\ndiff --git a/fiftyone/server/samples.py b/fiftyone/server/samples.py\n--- a/fiftyone/server/samples.py\n+++ b/fiftyone/server/samples.py\n@@ -129,7 +129,9 @@\n url_cache = {}\n nodes = await asyncio.gather(\n *[\n- _create_sample_item(view, sample, metadata_cache, url_cache)\n+ _create_sample_item(\n+ view, sample, metadata_cache, url_cache, pagination_data\n+ )\n for sample in samples\n ]\n )\n@@ -159,6 +161,7 @@\n sample: t.Dict,\n metadata_cache: t.Dict[str, t.Dict],\n url_cache: t.Dict[str, str],\n+ pagination_data: bool,\n ) -> SampleItem:\n media_type = fom.get_media_type(sample[\"filepath\"])\n \n@@ -178,4 +181,9 @@\n if cls == VideoSample:\n metadata = dict(**metadata, frame_number=sample.get(\"frame_number\", 1))\n \n- return from_dict(cls, {\"id\": sample[\"_id\"], \"sample\": sample, **metadata})\n+ _id = sample[\"_id\"]\n+\n+ if not pagination_data:\n+ _id = f\"{_id}-modal\"\n+\n+ return from_dict(cls, {\"id\": _id, \"sample\": sample, **metadata})\n", "issue": "[BUG] Group sample in modal missing attributes\nIn `0.21.6`, a group sample in the modal will be missing attribute data unless the fields are added via `add_dynamic_sample_fields()` or `add_dynamic_frame_fields()`. This is due a subtle detail in the `relay` cache, which is straightforward to resolve.\r\n\r\n```py\r\nimport fiftyone as fo\r\n\r\ndataset = foz.load_zoo_dataset(\"quickstart-groups\")\r\n\r\nfor sample in dataset:\r\n for detection in sample.ground_truth.detections:\r\n detection[\"my_attr\"] = \"value\"\r\n sample.save()\r\n\r\n# tooltip will not contain `my_attr` on detections\r\nsession = fo.launch_app(dataset)\r\n\r\n\r\n# will force `my_attr` to appear\r\ndataset.add_dynamic_sample_fields()\r\n```\n", "before_files": [{"content": "\"\"\"\nFiftyOne Server /media route\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport typing as t\n\nimport aiofiles\nfrom aiofiles.threadpool.binary import AsyncBufferedReader\nfrom aiofiles.os import stat as aio_stat\nfrom starlette.endpoints import HTTPEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import (\n FileResponse,\n Response,\n StreamingResponse,\n guess_type,\n)\n\n\nasync def ranged(\n file: AsyncBufferedReader,\n start: int = 0,\n end: int = None,\n block_size: int = 8192,\n) -> t.AsyncGenerator:\n consumed = 0\n\n await file.seek(start)\n\n while True:\n data_length = (\n min(block_size, end - start - consumed) if end else block_size\n )\n\n if data_length <= 0:\n break\n\n data = await file.read(data_length)\n\n if not data:\n break\n\n consumed += data_length\n\n yield data\n\n if hasattr(file, \"close\"):\n await file.close()\n\n\nclass Media(HTTPEndpoint):\n async def get(\n self, request: Request\n ) -> t.Union[FileResponse, StreamingResponse]:\n path = request.query_params[\"filepath\"]\n\n response: t.Union[FileResponse, StreamingResponse]\n if request.headers.get(\"range\"):\n response = await self.ranged_file_response(path, request)\n else:\n response = FileResponse(\n path,\n )\n response.headers[\"Accept-Ranges\"] = \"bytes\"\n\n return response\n\n async def ranged_file_response(\n self, path: str, request: Request\n ) -> StreamingResponse:\n file = await aiofiles.open(path, \"rb\")\n file_size = (await aio_stat(path)).st_size\n content_range = request.headers.get(\"range\")\n content_length = file_size\n status_code = 200\n headers = {}\n\n if content_range is not None:\n content_range = content_range.strip().lower()\n\n content_ranges = content_range.split(\"=\")[-1]\n\n range_start, range_end, *_ = map(\n str.strip, (content_ranges + \"-\").split(\"-\")\n )\n\n start, end = (\n int(range_start) if range_start else 0,\n int(range_end) if range_end else file_size - 1,\n )\n range_start = max(0, start)\n range_end = min(file_size - 1, int(end))\n\n content_length = (end - start) + 1\n\n file_response = ranged(file, start=start, end=end + 1)\n\n status_code = 206\n\n headers[\"Content-Range\"] = f\"bytes {start}-{end}/{file_size}\"\n\n response = StreamingResponse(\n file_response,\n media_type=guess_type(path)[0],\n status_code=status_code,\n )\n\n response.headers.update(\n {\n \"Accept-Ranges\": \"bytes\",\n \"Content-Length\": str(content_length),\n **headers,\n }\n )\n\n return response\n\n async def head(self, request: Request) -> Response:\n path = request.query_params[\"filepath\"]\n response = Response()\n size = (await aio_stat(path)).st_size\n response.headers.update(\n {\n \"Accept-Ranges\": \"bytes\",\n \"Content-Type\": guess_type(path)[0],\n \"Content-Length\": size,\n }\n )\n return response\n\n async def options(self, request: Request) -> Response:\n response = Response()\n response.headers[\"Accept-Ranges\"] = \"bytes\"\n response.headers[\"Allow\"] = \"OPTIONS, GET, HEAD\"\n return response\n", "path": "fiftyone/server/routes/media.py"}, {"content": "\"\"\"\nFiftyOne Server samples pagination\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport asyncio\nimport strawberry as gql\nimport typing as t\n\n\nfrom fiftyone.core.collections import SampleCollection\nimport fiftyone.core.media as fom\nimport fiftyone.core.odm as foo\nfrom fiftyone.core.utils import run_sync_task\n\nfrom fiftyone.server.filters import SampleFilter\nimport fiftyone.server.metadata as fosm\nfrom fiftyone.server.paginator import Connection, Edge, PageInfo\nfrom fiftyone.server.scalars import BSON, JSON, BSONArray\nfrom fiftyone.server.utils import from_dict\nimport fiftyone.server.view as fosv\n\n\[email protected]\nclass MediaURL:\n field: str\n url: t.Optional[str]\n\n\[email protected]\nclass Sample:\n id: gql.ID\n sample: JSON\n urls: t.List[MediaURL]\n aspect_ratio: float\n\n\[email protected]\nclass ImageSample(Sample):\n pass\n\n\[email protected]\nclass PointCloudSample(Sample):\n pass\n\n\[email protected]\nclass VideoSample(Sample):\n frame_number: int\n frame_rate: float\n\n\nSampleItem = gql.union(\n \"SampleItem\", types=(ImageSample, PointCloudSample, VideoSample)\n)\n\nMEDIA_TYPES = {\n fom.IMAGE: ImageSample,\n fom.POINT_CLOUD: PointCloudSample,\n fom.VIDEO: VideoSample,\n}\n\n\nasync def paginate_samples(\n dataset: str,\n stages: BSONArray,\n filters: JSON,\n first: int,\n after: t.Optional[str] = None,\n extended_stages: t.Optional[BSON] = None,\n sample_filter: t.Optional[SampleFilter] = None,\n pagination_data: t.Optional[bool] = False,\n) -> Connection[t.Union[ImageSample, VideoSample], str]:\n run = lambda reload: fosv.get_view(\n dataset,\n stages=stages,\n filters=filters,\n pagination_data=pagination_data,\n extended_stages=extended_stages,\n sample_filter=sample_filter,\n reload=reload,\n )\n try:\n view = await run_sync_task(run, False)\n except:\n view = await run_sync_task(run, True)\n\n # check frame field schema explicitly, media type is not reliable for groups\n has_frames = view.get_frame_field_schema() is not None\n\n # TODO: Remove this once we have a better way to handle large videos. This\n # is a temporary fix to reduce the $lookup overhead for sample frames on\n # full datasets.\n full_lookup = has_frames and (filters or stages)\n support = [1, 1] if not full_lookup else None\n if after is None:\n after = \"-1\"\n\n if int(after) > -1:\n view = view.skip(int(after) + 1)\n\n pipeline = view._pipeline(\n attach_frames=has_frames,\n detach_frames=False,\n manual_group_select=sample_filter\n and sample_filter.group\n and (sample_filter.group.id and not sample_filter.group.slices),\n support=support,\n )\n\n # Only return the first frame of each video sample for the grid thumbnail\n if has_frames:\n pipeline.append({\"$addFields\": {\"frames\": {\"$slice\": [\"$frames\", 1]}}})\n\n samples = await foo.aggregate(\n foo.get_async_db_conn()[view._dataset._sample_collection_name],\n pipeline,\n ).to_list(first + 1)\n\n more = False\n if len(samples) > first:\n samples = samples[:first]\n more = True\n\n metadata_cache = {}\n url_cache = {}\n nodes = await asyncio.gather(\n *[\n _create_sample_item(view, sample, metadata_cache, url_cache)\n for sample in samples\n ]\n )\n\n edges = []\n for idx, node in enumerate(nodes):\n edges.append(\n Edge(\n node=node,\n cursor=str(idx + int(after) + 1),\n )\n )\n\n return Connection(\n page_info=PageInfo(\n has_previous_page=False,\n has_next_page=more,\n start_cursor=edges[0].cursor if edges else None,\n end_cursor=edges[-1].cursor if len(edges) > 1 else None,\n ),\n edges=edges,\n )\n\n\nasync def _create_sample_item(\n dataset: SampleCollection,\n sample: t.Dict,\n metadata_cache: t.Dict[str, t.Dict],\n url_cache: t.Dict[str, str],\n) -> SampleItem:\n media_type = fom.get_media_type(sample[\"filepath\"])\n\n if media_type == fom.IMAGE:\n cls = ImageSample\n elif media_type == fom.VIDEO:\n cls = VideoSample\n elif media_type == fom.POINT_CLOUD:\n cls = PointCloudSample\n else:\n raise ValueError(f\"unknown media type '{media_type}'\")\n\n metadata = await fosm.get_metadata(\n dataset, sample, media_type, metadata_cache, url_cache\n )\n\n if cls == VideoSample:\n metadata = dict(**metadata, frame_number=sample.get(\"frame_number\", 1))\n\n return from_dict(cls, {\"id\": sample[\"_id\"], \"sample\": sample, **metadata})\n", "path": "fiftyone/server/samples.py"}]}
| 3,463 | 513 |
gh_patches_debug_4951
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4919
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mitmproxy depends on `pkg_resources` at runtime but does not specify `install_requires=setuptools`
#### Problem Description
A clear and concise description of what the bug is.
https://github.com/mitmproxy/mitmproxy/blob/0ca458fd6475ee48728147f3b529467a75e912a4/mitmproxy/contrib/kaitaistruct/exif.py#L7
the `pkg_resources` module is provided by `setuptools`
installation into a minimal environment (for example, bazel) will break without also needing to specify `setuptools`
mitmproxy should depend on `setuptools`
#### Steps to reproduce the behavior:
1. simulate a minimal environment:
```
virtualenv venv
venv/bin/pip install mitmproxy
venv/bin/pip uninstall setuptools
venv/bin/pip install mitmproxy # make sure we actually have its deps even after uninstalling
```
2. run `mitmproxy --help`
```console
$ venv/bin/mitmproxy --help
Traceback (most recent call last):
File "venv/bin/mitmproxy", line 5, in <module>
from mitmproxy.tools.main import mitmproxy
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py", line 8, in <module>
from mitmproxy import exceptions, master
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py", line 10, in <module>
from mitmproxy import eventsequence
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py", line 8, in <module>
from mitmproxy.proxy import layers
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py", line 1, in <module>
from . import modes
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py", line 9, in <module>
from mitmproxy.proxy.layers import tls
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py", line 8, in <module>
from mitmproxy.net import tls as net_tls
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py", line 18, in <module>
from mitmproxy.contrib.kaitaistruct import tls_client_hello
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py", line 7, in <module>
from pkg_resources import parse_version
ModuleNotFoundError: No module named 'pkg_resources'
```
#### System Information
Paste the output of "mitmproxy --version" here.
```console
$ venv/bin/mitmproxy --version
Traceback (most recent call last):
File "venv/bin/mitmproxy", line 5, in <module>
from mitmproxy.tools.main import mitmproxy
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py", line 8, in <module>
from mitmproxy import exceptions, master
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py", line 10, in <module>
from mitmproxy import eventsequence
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py", line 8, in <module>
from mitmproxy.proxy import layers
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py", line 1, in <module>
from . import modes
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py", line 9, in <module>
from mitmproxy.proxy.layers import tls
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py", line 8, in <module>
from mitmproxy.net import tls as net_tls
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py", line 18, in <module>
from mitmproxy.contrib.kaitaistruct import tls_client_hello
File "/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py", line 7, in <module>
from pkg_resources import parse_version
ModuleNotFoundError: No module named 'pkg_resources'
```
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3 from codecs import open
4
5 from setuptools import find_packages, setup
6
7 # Based on https://github.com/pypa/sampleproject/blob/main/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
13 long_description = f.read()
14 long_description_content_type = "text/markdown"
15
16 with open(os.path.join(here, "mitmproxy", "version.py")) as f:
17 match = re.search(r'VERSION = "(.+?)"', f.read())
18 assert match
19 VERSION = match.group(1)
20
21 setup(
22 name="mitmproxy",
23 version=VERSION,
24 description="An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.",
25 long_description=long_description,
26 long_description_content_type=long_description_content_type,
27 url="http://mitmproxy.org",
28 author="Aldo Cortesi",
29 author_email="[email protected]",
30 license="MIT",
31 classifiers=[
32 "License :: OSI Approved :: MIT License",
33 "Development Status :: 5 - Production/Stable",
34 "Environment :: Console :: Curses",
35 "Operating System :: MacOS",
36 "Operating System :: POSIX",
37 "Operating System :: Microsoft :: Windows",
38 "Programming Language :: Python :: 3 :: Only",
39 "Programming Language :: Python :: 3.8",
40 "Programming Language :: Python :: 3.9",
41 "Programming Language :: Python :: 3.10",
42 "Programming Language :: Python :: Implementation :: CPython",
43 "Topic :: Security",
44 "Topic :: Internet :: WWW/HTTP",
45 "Topic :: Internet :: Proxy Servers",
46 "Topic :: System :: Networking :: Monitoring",
47 "Topic :: Software Development :: Testing",
48 "Typing :: Typed",
49 ],
50 project_urls={
51 'Documentation': 'https://docs.mitmproxy.org/stable/',
52 'Source': 'https://github.com/mitmproxy/mitmproxy/',
53 'Tracker': 'https://github.com/mitmproxy/mitmproxy/issues',
54 },
55 packages=find_packages(include=[
56 "mitmproxy", "mitmproxy.*",
57 ]),
58 include_package_data=True,
59 entry_points={
60 'console_scripts': [
61 "mitmproxy = mitmproxy.tools.main:mitmproxy",
62 "mitmdump = mitmproxy.tools.main:mitmdump",
63 "mitmweb = mitmproxy.tools.main:mitmweb",
64 ]
65 },
66 python_requires='>=3.8',
67 # https://packaging.python.org/en/latest/requirements/#install-requires
68 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
69 install_requires=[
70 "asgiref>=3.2.10,<3.5",
71 "blinker>=1.4, <1.5",
72 "Brotli>=1.0,<1.1",
73 "certifi>=2019.9.11", # no semver here - this should always be on the last release!
74 "click>=7.0,<8.1",
75 "cryptography>=3.3,<3.5",
76 "flask>=1.1.1,<2.1",
77 "h11>=0.11,<0.13",
78 "h2>=4.1,<5",
79 "hyperframe>=6.0,<7",
80 "kaitaistruct>=0.7,<0.10",
81 "ldap3>=2.8,<2.10",
82 "msgpack>=1.0.0, <1.1.0",
83 "passlib>=1.6.5, <1.8",
84 "protobuf>=3.14,<3.19",
85 "pyOpenSSL>=21.0,<21.1",
86 "pyparsing>=2.4.2,<2.5",
87 "pyperclip>=1.6.0,<1.9",
88 "ruamel.yaml>=0.16,<0.17.17",
89 "sortedcontainers>=2.3,<2.5",
90 "tornado>=6.1,<7",
91 "urwid>=2.1.1,<2.2",
92 "wsproto>=1.0,<1.1",
93 "publicsuffix2>=2.20190812,<3",
94 "zstandard>=0.11,<0.16",
95 ],
96 extras_require={
97 ':sys_platform == "win32"': [
98 "pydivert>=2.0.3,<2.2",
99 ],
100 'dev': [
101 "hypothesis>=5.8,<7",
102 "parver>=0.1,<2.0",
103 "pdoc>=4.0.0",
104 "pyinstaller==4.5.1",
105 "pytest-asyncio>=0.10.0,<0.16,!=0.14",
106 "pytest-cov>=2.7.1,<3",
107 "pytest-timeout>=1.3.3,<2",
108 "pytest-xdist>=2.1.0,<3",
109 "pytest>=6.1.0,<7",
110 "requests>=2.9.1,<3",
111 "tox>=3.5,<4",
112 "wheel>=0.36.2,<0.38",
113 "coverage==5.5", # workaround issue with import errors introduced in 5.6b1/6.0
114 ],
115 }
116 )
117
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -86,6 +86,8 @@
"pyparsing>=2.4.2,<2.5",
"pyperclip>=1.6.0,<1.9",
"ruamel.yaml>=0.16,<0.17.17",
+ # Kaitai parsers depend on setuptools, remove once https://github.com/kaitai-io/kaitai_struct_python_runtime/issues/62 is fixed
+ "setuptools",
"sortedcontainers>=2.3,<2.5",
"tornado>=6.1,<7",
"urwid>=2.1.1,<2.2",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -86,6 +86,8 @@\n \"pyparsing>=2.4.2,<2.5\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.17.17\",\n+ # Kaitai parsers depend on setuptools, remove once https://github.com/kaitai-io/kaitai_struct_python_runtime/issues/62 is fixed\n+ \"setuptools\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n", "issue": "mitmproxy depends on `pkg_resources` at runtime but does not specify `install_requires=setuptools`\n#### Problem Description\r\nA clear and concise description of what the bug is.\r\n\r\nhttps://github.com/mitmproxy/mitmproxy/blob/0ca458fd6475ee48728147f3b529467a75e912a4/mitmproxy/contrib/kaitaistruct/exif.py#L7\r\n\r\nthe `pkg_resources` module is provided by `setuptools`\r\n\r\ninstallation into a minimal environment (for example, bazel) will break without also needing to specify `setuptools`\r\n\r\nmitmproxy should depend on `setuptools`\r\n\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\n1. simulate a minimal environment:\r\n\r\n```\r\nvirtualenv venv\r\nvenv/bin/pip install mitmproxy\r\nvenv/bin/pip uninstall setuptools\r\nvenv/bin/pip install mitmproxy # make sure we actually have its deps even after uninstalling\r\n```\r\n\r\n2. run `mitmproxy --help`\r\n\r\n```console\r\n$ venv/bin/mitmproxy --help\r\nTraceback (most recent call last):\r\n File \"venv/bin/mitmproxy\", line 5, in <module>\r\n from mitmproxy.tools.main import mitmproxy\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py\", line 8, in <module>\r\n from mitmproxy import exceptions, master\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py\", line 10, in <module>\r\n from mitmproxy import eventsequence\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py\", line 8, in <module>\r\n from mitmproxy.proxy import layers\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py\", line 1, in <module>\r\n from . import modes\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py\", line 9, in <module>\r\n from mitmproxy.proxy.layers import tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py\", line 8, in <module>\r\n from mitmproxy.net import tls as net_tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py\", line 18, in <module>\r\n from mitmproxy.contrib.kaitaistruct import tls_client_hello\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py\", line 7, in <module>\r\n from pkg_resources import parse_version\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n```\r\n\r\n#### System Information\r\nPaste the output of \"mitmproxy --version\" here.\r\n\r\n```console\r\n$ venv/bin/mitmproxy --version\r\nTraceback (most recent call last):\r\n File \"venv/bin/mitmproxy\", line 5, in <module>\r\n from mitmproxy.tools.main import mitmproxy\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/tools/main.py\", line 8, in <module>\r\n from mitmproxy import exceptions, master\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/master.py\", line 10, in <module>\r\n from mitmproxy import eventsequence\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/eventsequence.py\", line 8, in <module>\r\n from mitmproxy.proxy import layers\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/__init__.py\", line 1, in <module>\r\n from . import modes\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/modes.py\", line 9, in <module>\r\n from mitmproxy.proxy.layers import tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/proxy/layers/tls.py\", line 8, in <module>\r\n from mitmproxy.net import tls as net_tls\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/net/tls.py\", line 18, in <module>\r\n from mitmproxy.contrib.kaitaistruct import tls_client_hello\r\n File \"/tmp/y/venv/lib/python3.8/site-packages/mitmproxy/contrib/kaitaistruct/tls_client_hello.py\", line 7, in <module>\r\n from pkg_resources import parse_version\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n```\n", "before_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy\", \"version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n 'Documentation': 'https://docs.mitmproxy.org/stable/',\n 'Source': 'https://github.com/mitmproxy/mitmproxy/',\n 'Tracker': 'https://github.com/mitmproxy/mitmproxy/issues',\n },\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ]\n },\n python_requires='>=3.8',\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"asgiref>=3.2.10,<3.5\",\n \"blinker>=1.4, <1.5\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"click>=7.0,<8.1\",\n \"cryptography>=3.3,<3.5\",\n \"flask>=1.1.1,<2.1\",\n \"h11>=0.11,<0.13\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.7,<0.10\",\n \"ldap3>=2.8,<2.10\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<3.19\",\n \"pyOpenSSL>=21.0,<21.1\",\n \"pyparsing>=2.4.2,<2.5\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.17.17\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.1\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.16\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==4.5.1\",\n \"pytest-asyncio>=0.10.0,<0.16,!=0.14\",\n \"pytest-cov>=2.7.1,<3\",\n \"pytest-timeout>=1.3.3,<2\",\n \"pytest-xdist>=2.1.0,<3\",\n \"pytest>=6.1.0,<7\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.38\",\n \"coverage==5.5\", # workaround issue with import errors introduced in 5.6b1/6.0\n ],\n }\n)\n", "path": "setup.py"}]}
| 3,066 | 162 |
gh_patches_debug_230
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-2545
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Releasing 1.0
With #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.
This should consist of:
- [x] assembling changelog #2440
- [x] making sure new features are well documented
- [x] publishing beta release
- [x] test beta (perhaps by adding it to the z2jh chart)
- [ ] release 1.0 final
</issue>
<code>
[start of jupyterhub/_version.py]
1 """JupyterHub version info"""
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4
5 version_info = (
6 1,
7 0,
8 0,
9 "b2", # release (b1, rc1, or "" for final or dev)
10 # "dev", # dev or nothing
11 )
12
13 # pep 440 version: no dot before beta/rc, but before .dev
14 # 0.1.0rc1
15 # 0.1.0a1
16 # 0.1.0b1.dev
17 # 0.1.0.dev
18
19 __version__ = ".".join(map(str, version_info[:3])) + ".".join(version_info[3:])
20
21
22 def _check_version(hub_version, singleuser_version, log):
23 """Compare Hub and single-user server versions"""
24 if not hub_version:
25 log.warning(
26 "Hub has no version header, which means it is likely < 0.8. Expected %s",
27 __version__,
28 )
29 return
30
31 if not singleuser_version:
32 log.warning(
33 "Single-user server has no version header, which means it is likely < 0.8. Expected %s",
34 __version__,
35 )
36 return
37
38 # compare minor X.Y versions
39 if hub_version != singleuser_version:
40 from distutils.version import LooseVersion as V
41
42 hub_major_minor = V(hub_version).version[:2]
43 singleuser_major_minor = V(singleuser_version).version[:2]
44 extra = ""
45 if singleuser_major_minor == hub_major_minor:
46 # patch-level mismatch or lower, log difference at debug-level
47 # because this should be fine
48 log_method = log.debug
49 else:
50 # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.
51 log_method = log.warning
52 extra = " This could cause failure to authenticate and result in redirect loops!"
53 log_method(
54 "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
55 hub_version,
56 singleuser_version,
57 )
58 else:
59 log.debug(
60 "jupyterhub and jupyterhub-singleuser both on version %s" % hub_version
61 )
62
[end of jupyterhub/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py
--- a/jupyterhub/_version.py
+++ b/jupyterhub/_version.py
@@ -6,7 +6,7 @@
1,
0,
0,
- "b2", # release (b1, rc1, or "" for final or dev)
+ # "b2", # release (b1, rc1, or "" for final or dev)
# "dev", # dev or nothing
)
|
{"golden_diff": "diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py\n--- a/jupyterhub/_version.py\n+++ b/jupyterhub/_version.py\n@@ -6,7 +6,7 @@\n 1,\n 0,\n 0,\n- \"b2\", # release (b1, rc1, or \"\" for final or dev)\n+ # \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n )\n", "issue": "Releasing 1.0\nWith #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.\r\n\r\nThis should consist of:\r\n\r\n- [x] assembling changelog #2440\r\n- [x] making sure new features are well documented\r\n- [x] publishing beta release\r\n- [x] test beta (perhaps by adding it to the z2jh chart)\r\n- [ ] release 1.0 final\n", "before_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 0,\n 0,\n \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}]}
| 1,257 | 117 |
gh_patches_debug_5303
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-498
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Closing a connection tries to close already closed cursors
When closing a connection the library will call `.close()` on every cursor created for that connection, including closed ones. While the code works, it produces a lot of logs of error level, polluting the logs.
#### Environment details
- OS type and version: Mac OS 10.15.7
- Python version: Python 3.8.2
- pip version: pip 20.3.3
- `google-cloud-bigquery` version: 2.7.0
#### Steps to reproduce
1. Close a cursor
2. Close the connection
3. Error log shows `Exception closing connection <google.cloud.bigquery.dbapi.connection.Connection object at 0x...>`
#### Code example
```python
from contextlib import closing
# using pybigquery
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(sql)
```
#### Stack trace
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/pool/base.py", line 270, in _close_connection
self._dialect.do_close(connection)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 549, in do_close
dbapi_connection.close()
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py", line 258, in with_closed_check
return method(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/connection.py", line 79, in close
cursor_.close()
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py", line 257, in with_closed_check
raise exc_class(exc_msg)
google.cloud.bigquery.dbapi.exceptions.ProgrammingError: Operating on a closed cursor.
```
#### Suggested fix
```python
# google/cloud/bigquery/dbapi/connection.py
class Connection(object):
...
def close(self):
...
for cursor_ in self._cursors_created:
if not cursor_._closed:
cursor_.close()
```
</issue>
<code>
[start of google/cloud/bigquery/dbapi/connection.py]
1 # Copyright 2017 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Connection for the Google BigQuery DB-API."""
16
17 import weakref
18
19 from google.cloud import bigquery
20 from google.cloud.bigquery.dbapi import cursor
21 from google.cloud.bigquery.dbapi import _helpers
22
23
24 @_helpers.raise_on_closed("Operating on a closed connection.")
25 class Connection(object):
26 """DB-API Connection to Google BigQuery.
27
28 Args:
29 client (Optional[google.cloud.bigquery.Client]):
30 A REST API client used to connect to BigQuery. If not passed, a
31 client is created using default options inferred from the environment.
32 bqstorage_client(\
33 Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \
34 ):
35 A client that uses the faster BigQuery Storage API to fetch rows from
36 BigQuery. If not passed, it is created using the same credentials
37 as ``client`` (provided that BigQuery Storage dependencies are installed).
38
39 If both clients are available, ``bqstorage_client`` is used for
40 fetching query results.
41 """
42
43 def __init__(self, client=None, bqstorage_client=None):
44 if client is None:
45 client = bigquery.Client()
46 self._owns_client = True
47 else:
48 self._owns_client = False
49
50 if bqstorage_client is None:
51 # A warning is already raised by the factory if instantiation fails.
52 bqstorage_client = client._create_bqstorage_client()
53 self._owns_bqstorage_client = bqstorage_client is not None
54 else:
55 self._owns_bqstorage_client = False
56
57 self._client = client
58 self._bqstorage_client = bqstorage_client
59
60 self._closed = False
61 self._cursors_created = weakref.WeakSet()
62
63 def close(self):
64 """Close the connection and any cursors created from it.
65
66 Any BigQuery clients explicitly passed to the constructor are *not*
67 closed, only those created by the connection instance itself.
68 """
69 self._closed = True
70
71 if self._owns_client:
72 self._client.close()
73
74 if self._owns_bqstorage_client:
75 # There is no close() on the BQ Storage client itself.
76 self._bqstorage_client._transport.grpc_channel.close()
77
78 for cursor_ in self._cursors_created:
79 cursor_.close()
80
81 def commit(self):
82 """No-op, but for consistency raise an error if connection is closed."""
83
84 def cursor(self):
85 """Return a new cursor object.
86
87 Returns:
88 google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.
89 """
90 new_cursor = cursor.Cursor(self)
91 self._cursors_created.add(new_cursor)
92 return new_cursor
93
94
95 def connect(client=None, bqstorage_client=None):
96 """Construct a DB-API connection to Google BigQuery.
97
98 Args:
99 client (Optional[google.cloud.bigquery.Client]):
100 A REST API client used to connect to BigQuery. If not passed, a
101 client is created using default options inferred from the environment.
102 bqstorage_client(\
103 Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \
104 ):
105 A client that uses the faster BigQuery Storage API to fetch rows from
106 BigQuery. If not passed, it is created using the same credentials
107 as ``client`` (provided that BigQuery Storage dependencies are installed).
108
109 If both clients are available, ``bqstorage_client`` is used for
110 fetching query results.
111
112 Returns:
113 google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.
114 """
115 return Connection(client, bqstorage_client)
116
[end of google/cloud/bigquery/dbapi/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/google/cloud/bigquery/dbapi/connection.py b/google/cloud/bigquery/dbapi/connection.py
--- a/google/cloud/bigquery/dbapi/connection.py
+++ b/google/cloud/bigquery/dbapi/connection.py
@@ -76,7 +76,8 @@
self._bqstorage_client._transport.grpc_channel.close()
for cursor_ in self._cursors_created:
- cursor_.close()
+ if not cursor_._closed:
+ cursor_.close()
def commit(self):
"""No-op, but for consistency raise an error if connection is closed."""
|
{"golden_diff": "diff --git a/google/cloud/bigquery/dbapi/connection.py b/google/cloud/bigquery/dbapi/connection.py\n--- a/google/cloud/bigquery/dbapi/connection.py\n+++ b/google/cloud/bigquery/dbapi/connection.py\n@@ -76,7 +76,8 @@\n self._bqstorage_client._transport.grpc_channel.close()\n \n for cursor_ in self._cursors_created:\n- cursor_.close()\n+ if not cursor_._closed:\n+ cursor_.close()\n \n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n", "issue": "Closing a connection tries to close already closed cursors\nWhen closing a connection the library will call `.close()` on every cursor created for that connection, including closed ones. While the code works, it produces a lot of logs of error level, polluting the logs.\r\n\r\n#### Environment details\r\n\r\n - OS type and version: Mac OS 10.15.7\r\n - Python version: Python 3.8.2\r\n - pip version: pip 20.3.3\r\n - `google-cloud-bigquery` version: 2.7.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Close a cursor\r\n 2. Close the connection\r\n 3. Error log shows `Exception closing connection <google.cloud.bigquery.dbapi.connection.Connection object at 0x...>`\r\n\r\n#### Code example\r\n\r\n```python\r\nfrom contextlib import closing\r\n\r\n# using pybigquery\r\nwith closing(engine.raw_connection()) as conn:\r\n with closing(conn.cursor()) as cursor:\r\n cursor.execute(sql)\r\n```\r\n\r\n#### Stack trace\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/sqlalchemy/pool/base.py\", line 270, in _close_connection\r\n self._dialect.do_close(connection)\r\n File \"/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py\", line 549, in do_close\r\n dbapi_connection.close()\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py\", line 258, in with_closed_check\r\n return method(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/connection.py\", line 79, in close\r\n cursor_.close()\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py\", line 257, in with_closed_check\r\n raise exc_class(exc_msg)\r\ngoogle.cloud.bigquery.dbapi.exceptions.ProgrammingError: Operating on a closed cursor.\r\n```\r\n\r\n#### Suggested fix\r\n\r\n```python\r\n# google/cloud/bigquery/dbapi/connection.py\r\nclass Connection(object):\r\n ...\r\n def close(self):\r\n ...\r\n for cursor_ in self._cursors_created:\r\n if not cursor_._closed:\r\n cursor_.close()\r\n```\n", "before_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Connection for the Google BigQuery DB-API.\"\"\"\n\nimport weakref\n\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery.dbapi import cursor\nfrom google.cloud.bigquery.dbapi import _helpers\n\n\n@_helpers.raise_on_closed(\"Operating on a closed connection.\")\nclass Connection(object):\n \"\"\"DB-API Connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n \"\"\"\n\n def __init__(self, client=None, bqstorage_client=None):\n if client is None:\n client = bigquery.Client()\n self._owns_client = True\n else:\n self._owns_client = False\n\n if bqstorage_client is None:\n # A warning is already raised by the factory if instantiation fails.\n bqstorage_client = client._create_bqstorage_client()\n self._owns_bqstorage_client = bqstorage_client is not None\n else:\n self._owns_bqstorage_client = False\n\n self._client = client\n self._bqstorage_client = bqstorage_client\n\n self._closed = False\n self._cursors_created = weakref.WeakSet()\n\n def close(self):\n \"\"\"Close the connection and any cursors created from it.\n\n Any BigQuery clients explicitly passed to the constructor are *not*\n closed, only those created by the connection instance itself.\n \"\"\"\n self._closed = True\n\n if self._owns_client:\n self._client.close()\n\n if self._owns_bqstorage_client:\n # There is no close() on the BQ Storage client itself.\n self._bqstorage_client._transport.grpc_channel.close()\n\n for cursor_ in self._cursors_created:\n cursor_.close()\n\n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n\n def cursor(self):\n \"\"\"Return a new cursor object.\n\n Returns:\n google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.\n \"\"\"\n new_cursor = cursor.Cursor(self)\n self._cursors_created.add(new_cursor)\n return new_cursor\n\n\ndef connect(client=None, bqstorage_client=None):\n \"\"\"Construct a DB-API connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n\n Returns:\n google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.\n \"\"\"\n return Connection(client, bqstorage_client)\n", "path": "google/cloud/bigquery/dbapi/connection.py"}]}
| 2,183 | 124 |
gh_patches_debug_24087
|
rasdani/github-patches
|
git_diff
|
web2py__web2py-1960
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't initiate simplejsonrpc service object
When attempting to instatiate a simplejsonrpc service using Python3, the following happens:
```
>>> from gluon.contrib.simplejsonrpc import ServerProxy
>>> URL = "http://localhost:8000/app/webservice/call/jsonrpc"
>>> service = ServerProxy(URL, verbose=False)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/opt/web2py/gluon/contrib/simplejsonrpc.py", line 92, in __init__
type, uri = urllib.splittype(uri)
AttributeError: module 'urllib' has no attribute 'splittype'
```
As far as I could see, the main problem is, among other little issues, that the function splittype has been moved to urllib.request, therefore it is not found.
</issue>
<code>
[start of gluon/contrib/simplejsonrpc.py]
1 # -*- coding: utf-8 -*-
2 # This program is free software; you can redistribute it and/or modify
3 # it under the terms of the GNU Lesser General Public License as published by the
4 # Free Software Foundation; either version 3, or (at your option) any later
5 # version.
6 #
7 # This program is distributed in the hope that it will be useful, but
8 # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
9 # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
10 # for more details.
11
12 "Pythonic simple JSON RPC Client implementation"
13 from __future__ import print_function
14
15 __author__ = "Mariano Reingart ([email protected])"
16 __copyright__ = "Copyright (C) 2011 Mariano Reingart"
17 __license__ = "LGPL 3.0"
18 __version__ = "0.05"
19
20 import sys
21 PY2 = sys.version_info[0] == 2
22
23 import urllib
24 if PY2:
25 from xmlrpclib import Transport, SafeTransport
26 from cStringIO import StringIO
27 else:
28 from xmlrpc.client import Transport, SafeTransport
29 from io import StringIO
30 import random
31 import json
32
33
34 class JSONRPCError(RuntimeError):
35 "Error object for remote procedure call fail"
36 def __init__(self, code, message, data=''):
37 value = "%s: %s\n%s" % (code, message, '\n'.join(data))
38 RuntimeError.__init__(self, value)
39 self.code = code
40 self.message = message
41 self.data = data
42
43
44 class JSONDummyParser:
45 "json wrapper for xmlrpclib parser interfase"
46 def __init__(self):
47 self.buf = StringIO()
48
49 def feed(self, data):
50 self.buf.write(data)
51
52 def close(self):
53 return self.buf.getvalue()
54
55
56 class JSONTransportMixin:
57 "json wrapper for xmlrpclib transport interfase"
58
59 def send_content(self, connection, request_body):
60 connection.putheader("Content-Type", "application/json")
61 connection.putheader("Content-Length", str(len(request_body)))
62 connection.endheaders()
63 if request_body:
64 connection.send(request_body)
65 # todo: add gzip compression
66
67 def getparser(self):
68 # get parser and unmarshaller
69 parser = JSONDummyParser()
70 return parser, parser
71
72
73 class JSONTransport(JSONTransportMixin, Transport):
74 pass
75
76
77 class JSONSafeTransport(JSONTransportMixin, SafeTransport):
78 pass
79
80
81 class ServerProxy(object):
82 "JSON RPC Simple Client Service Proxy"
83
84 def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):
85 self.location = uri # server location (url)
86 self.trace = verbose # show debug messages
87 self.exceptions = True # raise errors? (JSONRPCError)
88 self.timeout = None
89 self.json_request = self.json_response = ''
90 self.version = version # '2.0' for jsonrpc2
91
92 type, uri = urllib.splittype(uri)
93 if type not in ("http", "https"):
94 raise IOError("unsupported JSON-RPC protocol")
95 self.__host, self.__handler = urllib.splithost(uri)
96
97 if transport is None:
98 if type == "https":
99 transport = JSONSafeTransport()
100 else:
101 transport = JSONTransport()
102 self.__transport = transport
103 self.__encoding = encoding
104 self.__verbose = verbose
105
106 def __getattr__(self, attr):
107 "pseudo method that can be called"
108 return lambda *args, **vars: self.call(attr, *args, **vars)
109
110 def call(self, method, *args, **vars):
111 "JSON RPC communication (method invocation)"
112
113 # build data sent to the service
114 request_id = random.randint(0, sys.maxsize)
115 data = {'id': request_id, 'method': method, 'params': args or vars, }
116 if self.version:
117 data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600
118 request = json.dumps(data)
119
120 # make HTTP request (retry if connection is lost)
121 response = self.__transport.request(
122 self.__host,
123 self.__handler,
124 request,
125 verbose=self.__verbose
126 )
127
128 # store plain request and response for further debugging
129 self.json_request = request
130 self.json_response = response
131
132 # parse json data coming from service
133 # {'version': '1.1', 'id': id, 'result': result, 'error': None}
134 response = json.loads(response)
135
136 self.error = response.get('error', {})
137 if self.error and self.exceptions:
138 raise JSONRPCError(self.error.get('code', 0),
139 self.error.get('message', ''),
140 self.error.get('data', None))
141 if response['id'] != request_id:
142 raise JSONRPCError(0, "JSON Request ID != Response ID")
143
144 return response.get('result')
145
146
147 ServiceProxy = ServerProxy
148
149
150 if __name__ == "__main__":
151 # basic tests:
152 location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc"
153 client = ServerProxy(location, verbose='--verbose' in sys.argv,)
154 print(client.add(1, 2))
155
[end of gluon/contrib/simplejsonrpc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py
--- a/gluon/contrib/simplejsonrpc.py
+++ b/gluon/contrib/simplejsonrpc.py
@@ -20,11 +20,12 @@
import sys
PY2 = sys.version_info[0] == 2
-import urllib
if PY2:
+ import urllib
from xmlrpclib import Transport, SafeTransport
from cStringIO import StringIO
else:
+ import urllib.request as urllib
from xmlrpc.client import Transport, SafeTransport
from io import StringIO
import random
@@ -47,7 +48,7 @@
self.buf = StringIO()
def feed(self, data):
- self.buf.write(data)
+ self.buf.write(data.decode('utf-8'))
def close(self):
return self.buf.getvalue()
@@ -61,7 +62,7 @@
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
- connection.send(request_body)
+ connection.send(str.encode(request_body))
# todo: add gzip compression
def getparser(self):
|
{"golden_diff": "diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py\n--- a/gluon/contrib/simplejsonrpc.py\n+++ b/gluon/contrib/simplejsonrpc.py\n@@ -20,11 +20,12 @@\n import sys\n PY2 = sys.version_info[0] == 2\n \n-import urllib\n if PY2:\n+ import urllib\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\n else:\n+ import urllib.request as urllib\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\n import random\n@@ -47,7 +48,7 @@\n self.buf = StringIO()\n \n def feed(self, data):\n- self.buf.write(data)\n+ self.buf.write(data.decode('utf-8'))\n \n def close(self):\n return self.buf.getvalue()\n@@ -61,7 +62,7 @@\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n- connection.send(request_body)\n+ connection.send(str.encode(request_body))\n # todo: add gzip compression\n \n def getparser(self):\n", "issue": "Can't initiate simplejsonrpc service object\nWhen attempting to instatiate a simplejsonrpc service using Python3, the following happens:\r\n```\r\n>>> from gluon.contrib.simplejsonrpc import ServerProxy\r\n>>> URL = \"http://localhost:8000/app/webservice/call/jsonrpc\"\r\n>>> service = ServerProxy(URL, verbose=False)\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/opt/web2py/gluon/contrib/simplejsonrpc.py\", line 92, in __init__\r\n type, uri = urllib.splittype(uri)\r\nAttributeError: module 'urllib' has no attribute 'splittype'\r\n```\r\nAs far as I could see, the main problem is, among other little issues, that the function splittype has been moved to urllib.request, therefore it is not found.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation; either version 3, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n\n\"Pythonic simple JSON RPC Client implementation\"\nfrom __future__ import print_function\n\n__author__ = \"Mariano Reingart ([email protected])\"\n__copyright__ = \"Copyright (C) 2011 Mariano Reingart\"\n__license__ = \"LGPL 3.0\"\n__version__ = \"0.05\"\n\nimport sys\nPY2 = sys.version_info[0] == 2\n\nimport urllib\nif PY2:\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\nelse:\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\nimport random\nimport json\n\n\nclass JSONRPCError(RuntimeError):\n \"Error object for remote procedure call fail\"\n def __init__(self, code, message, data=''):\n value = \"%s: %s\\n%s\" % (code, message, '\\n'.join(data))\n RuntimeError.__init__(self, value)\n self.code = code\n self.message = message\n self.data = data\n\n\nclass JSONDummyParser:\n \"json wrapper for xmlrpclib parser interfase\"\n def __init__(self):\n self.buf = StringIO()\n\n def feed(self, data):\n self.buf.write(data)\n\n def close(self):\n return self.buf.getvalue()\n\n\nclass JSONTransportMixin:\n \"json wrapper for xmlrpclib transport interfase\"\n\n def send_content(self, connection, request_body):\n connection.putheader(\"Content-Type\", \"application/json\")\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n connection.send(request_body)\n # todo: add gzip compression\n\n def getparser(self):\n # get parser and unmarshaller\n parser = JSONDummyParser()\n return parser, parser\n\n\nclass JSONTransport(JSONTransportMixin, Transport):\n pass\n\n\nclass JSONSafeTransport(JSONTransportMixin, SafeTransport):\n pass\n\n\nclass ServerProxy(object):\n \"JSON RPC Simple Client Service Proxy\"\n\n def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):\n self.location = uri # server location (url)\n self.trace = verbose # show debug messages\n self.exceptions = True # raise errors? (JSONRPCError)\n self.timeout = None\n self.json_request = self.json_response = ''\n self.version = version # '2.0' for jsonrpc2\n\n type, uri = urllib.splittype(uri)\n if type not in (\"http\", \"https\"):\n raise IOError(\"unsupported JSON-RPC protocol\")\n self.__host, self.__handler = urllib.splithost(uri)\n\n if transport is None:\n if type == \"https\":\n transport = JSONSafeTransport()\n else:\n transport = JSONTransport()\n self.__transport = transport\n self.__encoding = encoding\n self.__verbose = verbose\n\n def __getattr__(self, attr):\n \"pseudo method that can be called\"\n return lambda *args, **vars: self.call(attr, *args, **vars)\n\n def call(self, method, *args, **vars):\n \"JSON RPC communication (method invocation)\"\n\n # build data sent to the service\n request_id = random.randint(0, sys.maxsize)\n data = {'id': request_id, 'method': method, 'params': args or vars, }\n if self.version:\n data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600\n request = json.dumps(data)\n\n # make HTTP request (retry if connection is lost)\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n\n # store plain request and response for further debugging\n self.json_request = request\n self.json_response = response\n\n # parse json data coming from service\n # {'version': '1.1', 'id': id, 'result': result, 'error': None}\n response = json.loads(response)\n\n self.error = response.get('error', {})\n if self.error and self.exceptions:\n raise JSONRPCError(self.error.get('code', 0),\n self.error.get('message', ''),\n self.error.get('data', None))\n if response['id'] != request_id:\n raise JSONRPCError(0, \"JSON Request ID != Response ID\")\n\n return response.get('result')\n\n\nServiceProxy = ServerProxy\n\n\nif __name__ == \"__main__\":\n # basic tests:\n location = \"http://www.web2py.com.ar/webservices/sample/call/jsonrpc\"\n client = ServerProxy(location, verbose='--verbose' in sys.argv,)\n print(client.add(1, 2))\n", "path": "gluon/contrib/simplejsonrpc.py"}]}
| 2,241 | 262 |
gh_patches_debug_9643
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-3819
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TooManyRedirects is not properly exposed publicly
## Description
https://github.com/aio-libs/aiohttp/commit/544716cf93b3856e485c8e993de60d9911c002cb (#2943) added `TooManyRedirects` and added it as `aiohttp.TooManyRedirects` [in the documentation](https://docs.aiohttp.org/en/latest/client_reference.html#aiohttp.TooManyRedirects).
However, it's not properly exposed publicly in `__init__.py` and can only be referenced as `aiohttp.client_exceptions.TooManyRedirects`.
## Expected Behaviour
`aiohttp.TooManyRedirects` works, by reference to `aiohttp.client_exceptions.TooManyRedirects`
## Actual Behaviour
`AttributeError: module 'aiohttp' has no attribute 'TooManyRedirects`
## Reproduction Steps
`import aiohttp` and attempt to use `aiohttp.TooManyRedirects`
## Environment
aiohttp 3.5.4
Also reproducible on latest master (https://github.com/aio-libs/aiohttp/commit/81cc715227f6b0f3b36ee6d8bcc95a63a37fbdab)
</issue>
<code>
[start of aiohttp/__init__.py]
1 __version__ = '4.0.0a0'
2
3 from typing import Tuple # noqa
4
5 from . import hdrs
6 from .client import (
7 BaseConnector,
8 ClientConnectionError,
9 ClientConnectorCertificateError,
10 ClientConnectorError,
11 ClientConnectorSSLError,
12 ClientError,
13 ClientHttpProxyError,
14 ClientOSError,
15 ClientPayloadError,
16 ClientProxyConnectionError,
17 ClientRequest,
18 ClientResponse,
19 ClientResponseError,
20 ClientSession,
21 ClientSSLError,
22 ClientTimeout,
23 ClientWebSocketResponse,
24 ContentTypeError,
25 Fingerprint,
26 InvalidURL,
27 RequestInfo,
28 ServerConnectionError,
29 ServerDisconnectedError,
30 ServerFingerprintMismatch,
31 ServerTimeoutError,
32 TCPConnector,
33 UnixConnector,
34 WSServerHandshakeError,
35 request,
36 )
37 from .cookiejar import CookieJar, DummyCookieJar
38 from .formdata import FormData
39 from .helpers import BasicAuth, ChainMapProxy
40 from .http import (
41 HttpVersion,
42 HttpVersion10,
43 HttpVersion11,
44 WebSocketError,
45 WSCloseCode,
46 WSMessage,
47 WSMsgType,
48 )
49 from .multipart import (
50 BadContentDispositionHeader,
51 BadContentDispositionParam,
52 BodyPartReader,
53 MultipartReader,
54 MultipartWriter,
55 content_disposition_filename,
56 parse_content_disposition,
57 )
58 from .payload import (
59 PAYLOAD_REGISTRY,
60 AsyncIterablePayload,
61 BufferedReaderPayload,
62 BytesIOPayload,
63 BytesPayload,
64 IOBasePayload,
65 JsonPayload,
66 Payload,
67 StringIOPayload,
68 StringPayload,
69 TextIOPayload,
70 get_payload,
71 payload_type,
72 )
73 from .resolver import AsyncResolver, DefaultResolver, ThreadedResolver
74 from .signals import Signal
75 from .streams import (
76 EMPTY_PAYLOAD,
77 DataQueue,
78 EofStream,
79 FlowControlDataQueue,
80 StreamReader,
81 )
82 from .tracing import (
83 TraceConfig,
84 TraceConnectionCreateEndParams,
85 TraceConnectionCreateStartParams,
86 TraceConnectionQueuedEndParams,
87 TraceConnectionQueuedStartParams,
88 TraceConnectionReuseconnParams,
89 TraceDnsCacheHitParams,
90 TraceDnsCacheMissParams,
91 TraceDnsResolveHostEndParams,
92 TraceDnsResolveHostStartParams,
93 TraceRequestChunkSentParams,
94 TraceRequestEndParams,
95 TraceRequestExceptionParams,
96 TraceRequestRedirectParams,
97 TraceRequestStartParams,
98 TraceResponseChunkReceivedParams,
99 )
100
101 __all__ = (
102 'hdrs',
103 # client
104 'BaseConnector',
105 'ClientConnectionError',
106 'ClientConnectorCertificateError',
107 'ClientConnectorError',
108 'ClientConnectorSSLError',
109 'ClientError',
110 'ClientHttpProxyError',
111 'ClientOSError',
112 'ClientPayloadError',
113 'ClientProxyConnectionError',
114 'ClientResponse',
115 'ClientRequest',
116 'ClientResponseError',
117 'ClientSSLError',
118 'ClientSession',
119 'ClientTimeout',
120 'ClientWebSocketResponse',
121 'ContentTypeError',
122 'Fingerprint',
123 'InvalidURL',
124 'RequestInfo',
125 'ServerConnectionError',
126 'ServerDisconnectedError',
127 'ServerFingerprintMismatch',
128 'ServerTimeoutError',
129 'TCPConnector',
130 'UnixConnector',
131 'WSServerHandshakeError',
132 'request',
133 # cookiejar
134 'CookieJar',
135 'DummyCookieJar',
136 # formdata
137 'FormData',
138 # helpers
139 'BasicAuth',
140 'ChainMapProxy',
141 # http
142 'HttpVersion',
143 'HttpVersion10',
144 'HttpVersion11',
145 'WSMsgType',
146 'WSCloseCode',
147 'WSMessage',
148 'WebSocketError',
149 # multipart
150 'BadContentDispositionHeader',
151 'BadContentDispositionParam',
152 'BodyPartReader',
153 'MultipartReader',
154 'MultipartWriter',
155 'content_disposition_filename',
156 'parse_content_disposition',
157 # payload
158 'AsyncIterablePayload',
159 'BufferedReaderPayload',
160 'BytesIOPayload',
161 'BytesPayload',
162 'IOBasePayload',
163 'JsonPayload',
164 'PAYLOAD_REGISTRY',
165 'Payload',
166 'StringIOPayload',
167 'StringPayload',
168 'TextIOPayload',
169 'get_payload',
170 'payload_type',
171 # resolver
172 'AsyncResolver',
173 'DefaultResolver',
174 'ThreadedResolver',
175 # signals
176 'Signal',
177 'DataQueue',
178 'EMPTY_PAYLOAD',
179 'EofStream',
180 'FlowControlDataQueue',
181 'StreamReader',
182 # tracing
183 'TraceConfig',
184 'TraceConnectionCreateEndParams',
185 'TraceConnectionCreateStartParams',
186 'TraceConnectionQueuedEndParams',
187 'TraceConnectionQueuedStartParams',
188 'TraceConnectionReuseconnParams',
189 'TraceDnsCacheHitParams',
190 'TraceDnsCacheMissParams',
191 'TraceDnsResolveHostEndParams',
192 'TraceDnsResolveHostStartParams',
193 'TraceRequestChunkSentParams',
194 'TraceRequestEndParams',
195 'TraceRequestExceptionParams',
196 'TraceRequestRedirectParams',
197 'TraceRequestStartParams',
198 'TraceResponseChunkReceivedParams',
199 ) # type: Tuple[str, ...]
200
201 try:
202 from .worker import GunicornWebWorker, GunicornUVLoopWebWorker # noqa
203 __all__ += ('GunicornWebWorker', 'GunicornUVLoopWebWorker')
204 except ImportError: # pragma: no cover
205 pass
206
[end of aiohttp/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/aiohttp/__init__.py b/aiohttp/__init__.py
--- a/aiohttp/__init__.py
+++ b/aiohttp/__init__.py
@@ -30,6 +30,7 @@
ServerFingerprintMismatch,
ServerTimeoutError,
TCPConnector,
+ TooManyRedirects,
UnixConnector,
WSServerHandshakeError,
request,
@@ -127,6 +128,7 @@
'ServerFingerprintMismatch',
'ServerTimeoutError',
'TCPConnector',
+ 'TooManyRedirects',
'UnixConnector',
'WSServerHandshakeError',
'request',
|
{"golden_diff": "diff --git a/aiohttp/__init__.py b/aiohttp/__init__.py\n--- a/aiohttp/__init__.py\n+++ b/aiohttp/__init__.py\n@@ -30,6 +30,7 @@\n ServerFingerprintMismatch,\n ServerTimeoutError,\n TCPConnector,\n+ TooManyRedirects,\n UnixConnector,\n WSServerHandshakeError,\n request,\n@@ -127,6 +128,7 @@\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TCPConnector',\n+ 'TooManyRedirects',\n 'UnixConnector',\n 'WSServerHandshakeError',\n 'request',\n", "issue": "TooManyRedirects is not properly exposed publicly\n## Description\r\n\r\nhttps://github.com/aio-libs/aiohttp/commit/544716cf93b3856e485c8e993de60d9911c002cb (#2943) added `TooManyRedirects` and added it as `aiohttp.TooManyRedirects` [in the documentation](https://docs.aiohttp.org/en/latest/client_reference.html#aiohttp.TooManyRedirects).\r\nHowever, it's not properly exposed publicly in `__init__.py` and can only be referenced as `aiohttp.client_exceptions.TooManyRedirects`.\r\n\r\n## Expected Behaviour\r\n\r\n`aiohttp.TooManyRedirects` works, by reference to `aiohttp.client_exceptions.TooManyRedirects`\r\n\r\n## Actual Behaviour\r\n\r\n`AttributeError: module 'aiohttp' has no attribute 'TooManyRedirects`\r\n\r\n## Reproduction Steps\r\n\r\n`import aiohttp` and attempt to use `aiohttp.TooManyRedirects`\r\n\r\n## Environment\r\n\r\naiohttp 3.5.4\r\nAlso reproducible on latest master (https://github.com/aio-libs/aiohttp/commit/81cc715227f6b0f3b36ee6d8bcc95a63a37fbdab)\n", "before_files": [{"content": "__version__ = '4.0.0a0'\n\nfrom typing import Tuple # noqa\n\nfrom . import hdrs\nfrom .client import (\n BaseConnector,\n ClientConnectionError,\n ClientConnectorCertificateError,\n ClientConnectorError,\n ClientConnectorSSLError,\n ClientError,\n ClientHttpProxyError,\n ClientOSError,\n ClientPayloadError,\n ClientProxyConnectionError,\n ClientRequest,\n ClientResponse,\n ClientResponseError,\n ClientSession,\n ClientSSLError,\n ClientTimeout,\n ClientWebSocketResponse,\n ContentTypeError,\n Fingerprint,\n InvalidURL,\n RequestInfo,\n ServerConnectionError,\n ServerDisconnectedError,\n ServerFingerprintMismatch,\n ServerTimeoutError,\n TCPConnector,\n UnixConnector,\n WSServerHandshakeError,\n request,\n)\nfrom .cookiejar import CookieJar, DummyCookieJar\nfrom .formdata import FormData\nfrom .helpers import BasicAuth, ChainMapProxy\nfrom .http import (\n HttpVersion,\n HttpVersion10,\n HttpVersion11,\n WebSocketError,\n WSCloseCode,\n WSMessage,\n WSMsgType,\n)\nfrom .multipart import (\n BadContentDispositionHeader,\n BadContentDispositionParam,\n BodyPartReader,\n MultipartReader,\n MultipartWriter,\n content_disposition_filename,\n parse_content_disposition,\n)\nfrom .payload import (\n PAYLOAD_REGISTRY,\n AsyncIterablePayload,\n BufferedReaderPayload,\n BytesIOPayload,\n BytesPayload,\n IOBasePayload,\n JsonPayload,\n Payload,\n StringIOPayload,\n StringPayload,\n TextIOPayload,\n get_payload,\n payload_type,\n)\nfrom .resolver import AsyncResolver, DefaultResolver, ThreadedResolver\nfrom .signals import Signal\nfrom .streams import (\n EMPTY_PAYLOAD,\n DataQueue,\n EofStream,\n FlowControlDataQueue,\n StreamReader,\n)\nfrom .tracing import (\n TraceConfig,\n TraceConnectionCreateEndParams,\n TraceConnectionCreateStartParams,\n TraceConnectionQueuedEndParams,\n TraceConnectionQueuedStartParams,\n TraceConnectionReuseconnParams,\n TraceDnsCacheHitParams,\n TraceDnsCacheMissParams,\n TraceDnsResolveHostEndParams,\n TraceDnsResolveHostStartParams,\n TraceRequestChunkSentParams,\n TraceRequestEndParams,\n TraceRequestExceptionParams,\n TraceRequestRedirectParams,\n TraceRequestStartParams,\n TraceResponseChunkReceivedParams,\n)\n\n__all__ = (\n 'hdrs',\n # client\n 'BaseConnector',\n 'ClientConnectionError',\n 'ClientConnectorCertificateError',\n 'ClientConnectorError',\n 'ClientConnectorSSLError',\n 'ClientError',\n 'ClientHttpProxyError',\n 'ClientOSError',\n 'ClientPayloadError',\n 'ClientProxyConnectionError',\n 'ClientResponse',\n 'ClientRequest',\n 'ClientResponseError',\n 'ClientSSLError',\n 'ClientSession',\n 'ClientTimeout',\n 'ClientWebSocketResponse',\n 'ContentTypeError',\n 'Fingerprint',\n 'InvalidURL',\n 'RequestInfo',\n 'ServerConnectionError',\n 'ServerDisconnectedError',\n 'ServerFingerprintMismatch',\n 'ServerTimeoutError',\n 'TCPConnector',\n 'UnixConnector',\n 'WSServerHandshakeError',\n 'request',\n # cookiejar\n 'CookieJar',\n 'DummyCookieJar',\n # formdata\n 'FormData',\n # helpers\n 'BasicAuth',\n 'ChainMapProxy',\n # http\n 'HttpVersion',\n 'HttpVersion10',\n 'HttpVersion11',\n 'WSMsgType',\n 'WSCloseCode',\n 'WSMessage',\n 'WebSocketError',\n # multipart\n 'BadContentDispositionHeader',\n 'BadContentDispositionParam',\n 'BodyPartReader',\n 'MultipartReader',\n 'MultipartWriter',\n 'content_disposition_filename',\n 'parse_content_disposition',\n # payload\n 'AsyncIterablePayload',\n 'BufferedReaderPayload',\n 'BytesIOPayload',\n 'BytesPayload',\n 'IOBasePayload',\n 'JsonPayload',\n 'PAYLOAD_REGISTRY',\n 'Payload',\n 'StringIOPayload',\n 'StringPayload',\n 'TextIOPayload',\n 'get_payload',\n 'payload_type',\n # resolver\n 'AsyncResolver',\n 'DefaultResolver',\n 'ThreadedResolver',\n # signals\n 'Signal',\n 'DataQueue',\n 'EMPTY_PAYLOAD',\n 'EofStream',\n 'FlowControlDataQueue',\n 'StreamReader',\n # tracing\n 'TraceConfig',\n 'TraceConnectionCreateEndParams',\n 'TraceConnectionCreateStartParams',\n 'TraceConnectionQueuedEndParams',\n 'TraceConnectionQueuedStartParams',\n 'TraceConnectionReuseconnParams',\n 'TraceDnsCacheHitParams',\n 'TraceDnsCacheMissParams',\n 'TraceDnsResolveHostEndParams',\n 'TraceDnsResolveHostStartParams',\n 'TraceRequestChunkSentParams',\n 'TraceRequestEndParams',\n 'TraceRequestExceptionParams',\n 'TraceRequestRedirectParams',\n 'TraceRequestStartParams',\n 'TraceResponseChunkReceivedParams',\n) # type: Tuple[str, ...]\n\ntry:\n from .worker import GunicornWebWorker, GunicornUVLoopWebWorker # noqa\n __all__ += ('GunicornWebWorker', 'GunicornUVLoopWebWorker')\nexcept ImportError: # pragma: no cover\n pass\n", "path": "aiohttp/__init__.py"}]}
| 2,521 | 147 |
gh_patches_debug_13016
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-3847
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AzureResult read function not working
## Description
<!-- A clear description of the bug -->
Using `AzureResult` to read a file, an exception is raised:
> TypeError: a bytes-like object is required, not 'StorageStreamDownloader'
Looking at the `read` function in `src/prefect/engine/results/azure_result.py` I see the following:
```python
content_string = client.download_blob()
try:
new.value = new.serializer.deserialize(content_string)
```
`client.download_blob()` returns a `StorageStreamDownloader`, while `new.serailizer.deserialize(content_string)` expects a byte array.
## Expected Behavior
I would expect that the file would be read without raising an exception.
## Reproduction
```python
from prefect.engine.results import AzureResult
con_string = ""
azure_result = AzureResult(container="data", connection_string=con_string)
w = azure_result.write({"Test": "123"})
r = azure_result.read(w.location)
```
## Environment
```json
{
"config_overrides": {
"context": {
"secrets": false
}
},
"env_vars": [],
"system_information": {
"platform": "Windows-10-10.0.18362-SP0",
"prefect_backend": "server",
"prefect_version": "0.13.19",
"python_version": "3.9.0"
}
}
```
</issue>
<code>
[start of src/prefect/engine/results/azure_result.py]
1 import os
2 from typing import TYPE_CHECKING, Any
3
4 from prefect.client import Secret
5 from prefect.engine.result import Result
6
7 if TYPE_CHECKING:
8 import azure.storage.blob
9
10
11 class AzureResult(Result):
12 """
13 Result for writing to and reading from an Azure Blob storage.
14
15 Note that your flow's runtime environment must be able to authenticate with
16 Azure; there are currently two supported options: provide a connection string
17 either at initialization or at runtime through an environment variable, or
18 set your Azure connection string as a Prefect Secret. Using an environment
19 variable is the recommended approach.
20
21 Args:
22 - container (str): the name of the container to write to / read from
23 - connection_string (str, optional): an Azure connection string for communicating with
24 Blob storage. If not provided the value set in the environment as
25 `AZURE_STORAGE_CONNECTION_STRING` will be used
26 - connection_string_secret (str, optional): the name of a Prefect Secret
27 which stores your Azure connection tring
28 - **kwargs (Any, optional): any additional `Result` initialization options
29 """
30
31 def __init__(
32 self,
33 container: str,
34 connection_string: str = None,
35 connection_string_secret: str = None,
36 **kwargs: Any
37 ) -> None:
38 self.container = container
39 self.connection_string = connection_string or os.getenv(
40 "AZURE_STORAGE_CONNECTION_STRING"
41 )
42 self.connection_string_secret = connection_string_secret
43 super().__init__(**kwargs)
44
45 def initialize_service(self) -> None:
46 """
47 Initialize a Blob service.
48 """
49 import azure.storage.blob
50
51 connection_string = self.connection_string
52 if not connection_string and self.connection_string_secret:
53 connection_string = Secret(self.connection_string_secret).get()
54
55 self._service = azure.storage.blob.BlobServiceClient.from_connection_string(
56 conn_str=connection_string
57 )
58
59 @property
60 def service(self) -> "azure.storage.blob.BlobServiceClient":
61 if not hasattr(self, "_service"):
62 self.initialize_service()
63 return self._service
64
65 @service.setter
66 def service(self, val: Any) -> None:
67 self._service = val
68
69 def __getstate__(self) -> dict:
70 state = self.__dict__.copy()
71 if "_service" in state:
72 del state["_service"]
73 return state
74
75 def __setstate__(self, state: dict) -> None:
76 self.__dict__.update(state)
77
78 def write(self, value_: Any, **kwargs: Any) -> Result:
79 """
80 Writes the result value to a blob storage in Azure.
81
82 Args:
83 - value_ (Any): the value to write; will then be stored as the `value` attribute
84 of the returned `Result` instance
85 - **kwargs (optional): if provided, will be used to format the location template
86 to determine the location to write to
87
88 Returns:
89 - Result: a new Result instance with the appropriately formatted location
90 """
91 new = self.format(**kwargs)
92 new.value = value_
93
94 self.logger.debug("Starting to upload result to {}...".format(new.location))
95
96 # prepare data
97 binary_data = new.serializer.serialize(new.value)
98
99 # initialize client and upload
100 client = self.service.get_blob_client(
101 container=self.container, blob=new.location
102 )
103 client.upload_blob(binary_data)
104
105 self.logger.debug("Finished uploading result to {}.".format(new.location))
106
107 return new
108
109 def read(self, location: str) -> Result:
110 """
111 Reads a result from an Azure Blob container and returns a corresponding `Result` instance.
112
113 Args:
114 - location (str): the Azure blob location to read from
115
116 Returns:
117 - Result: the read result
118 """
119 new = self.copy()
120 new.location = location
121
122 try:
123 self.logger.debug("Starting to download result from {}...".format(location))
124
125 # initialize client and download
126 client = self.service.get_blob_client(
127 container=self.container, blob=location
128 )
129 content_string = client.download_blob()
130
131 try:
132 new.value = new.serializer.deserialize(content_string)
133 except EOFError:
134 new.value = None
135 self.logger.debug("Finished downloading result from {}.".format(location))
136 except Exception as exc:
137 self.logger.exception(
138 "Unexpected error while reading from result handler: {}".format(
139 repr(exc)
140 )
141 )
142 raise exc
143 return new
144
145 def exists(self, location: str, **kwargs: Any) -> bool:
146 """
147 Checks whether the target result exists.
148
149 Does not validate whether the result is `valid`, only that it is present.
150
151 Args:
152 - location (str): Location of the result in the specific result target.
153 Will check whether the provided location exists
154 - **kwargs (Any): string format arguments for `location`
155
156 Returns:
157 - bool: whether or not the target result exists.
158 """
159 from azure.core.exceptions import ResourceNotFoundError
160
161 # initialize client and download
162 client = self.service.get_blob_client(
163 container=self.container, blob=location.format(**kwargs)
164 )
165
166 # Catch exception because Azure python bindings do not yet have an exists method
167 # https://github.com/Azure/azure-sdk-for-python/issues/9507
168 try:
169 client.get_blob_properties()
170 return True
171 except ResourceNotFoundError:
172 return False
173
[end of src/prefect/engine/results/azure_result.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/prefect/engine/results/azure_result.py b/src/prefect/engine/results/azure_result.py
--- a/src/prefect/engine/results/azure_result.py
+++ b/src/prefect/engine/results/azure_result.py
@@ -126,10 +126,10 @@
client = self.service.get_blob_client(
container=self.container, blob=location
)
- content_string = client.download_blob()
+ content_bytes = client.download_blob().content_as_bytes()
try:
- new.value = new.serializer.deserialize(content_string)
+ new.value = new.serializer.deserialize(content_bytes)
except EOFError:
new.value = None
self.logger.debug("Finished downloading result from {}.".format(location))
|
{"golden_diff": "diff --git a/src/prefect/engine/results/azure_result.py b/src/prefect/engine/results/azure_result.py\n--- a/src/prefect/engine/results/azure_result.py\n+++ b/src/prefect/engine/results/azure_result.py\n@@ -126,10 +126,10 @@\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n- content_string = client.download_blob()\n+ content_bytes = client.download_blob().content_as_bytes()\n \n try:\n- new.value = new.serializer.deserialize(content_string)\n+ new.value = new.serializer.deserialize(content_bytes)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n", "issue": "AzureResult read function not working\n## Description\r\n<!-- A clear description of the bug -->\r\nUsing `AzureResult` to read a file, an exception is raised:\r\n\r\n> TypeError: a bytes-like object is required, not 'StorageStreamDownloader'\r\n\r\nLooking at the `read` function in `src/prefect/engine/results/azure_result.py` I see the following:\r\n\r\n```python\r\ncontent_string = client.download_blob()\r\n\r\ntry:\r\n new.value = new.serializer.deserialize(content_string)\r\n```\r\n\r\n`client.download_blob()` returns a `StorageStreamDownloader`, while `new.serailizer.deserialize(content_string)` expects a byte array.\r\n\r\n## Expected Behavior\r\nI would expect that the file would be read without raising an exception.\r\n\r\n## Reproduction\r\n```python\r\nfrom prefect.engine.results import AzureResult\r\n\r\ncon_string = \"\"\r\nazure_result = AzureResult(container=\"data\", connection_string=con_string)\r\n\r\nw = azure_result.write({\"Test\": \"123\"})\r\nr = azure_result.read(w.location)\r\n```\r\n\r\n\r\n## Environment\r\n```json\r\n{\r\n \"config_overrides\": {\r\n \"context\": {\r\n \"secrets\": false\r\n }\r\n },\r\n \"env_vars\": [],\r\n \"system_information\": {\r\n \"platform\": \"Windows-10-10.0.18362-SP0\",\r\n \"prefect_backend\": \"server\",\r\n \"prefect_version\": \"0.13.19\",\r\n \"python_version\": \"3.9.0\"\r\n }\r\n}\r\n```\n", "before_files": [{"content": "import os\nfrom typing import TYPE_CHECKING, Any\n\nfrom prefect.client import Secret\nfrom prefect.engine.result import Result\n\nif TYPE_CHECKING:\n import azure.storage.blob\n\n\nclass AzureResult(Result):\n \"\"\"\n Result for writing to and reading from an Azure Blob storage.\n\n Note that your flow's runtime environment must be able to authenticate with\n Azure; there are currently two supported options: provide a connection string\n either at initialization or at runtime through an environment variable, or\n set your Azure connection string as a Prefect Secret. Using an environment\n variable is the recommended approach.\n\n Args:\n - container (str): the name of the container to write to / read from\n - connection_string (str, optional): an Azure connection string for communicating with\n Blob storage. If not provided the value set in the environment as\n `AZURE_STORAGE_CONNECTION_STRING` will be used\n - connection_string_secret (str, optional): the name of a Prefect Secret\n which stores your Azure connection tring\n - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n\n def __init__(\n self,\n container: str,\n connection_string: str = None,\n connection_string_secret: str = None,\n **kwargs: Any\n ) -> None:\n self.container = container\n self.connection_string = connection_string or os.getenv(\n \"AZURE_STORAGE_CONNECTION_STRING\"\n )\n self.connection_string_secret = connection_string_secret\n super().__init__(**kwargs)\n\n def initialize_service(self) -> None:\n \"\"\"\n Initialize a Blob service.\n \"\"\"\n import azure.storage.blob\n\n connection_string = self.connection_string\n if not connection_string and self.connection_string_secret:\n connection_string = Secret(self.connection_string_secret).get()\n\n self._service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=connection_string\n )\n\n @property\n def service(self) -> \"azure.storage.blob.BlobServiceClient\":\n if not hasattr(self, \"_service\"):\n self.initialize_service()\n return self._service\n\n @service.setter\n def service(self, val: Any) -> None:\n self._service = val\n\n def __getstate__(self) -> dict:\n state = self.__dict__.copy()\n if \"_service\" in state:\n del state[\"_service\"]\n return state\n\n def __setstate__(self, state: dict) -> None:\n self.__dict__.update(state)\n\n def write(self, value_: Any, **kwargs: Any) -> Result:\n \"\"\"\n Writes the result value to a blob storage in Azure.\n\n Args:\n - value_ (Any): the value to write; will then be stored as the `value` attribute\n of the returned `Result` instance\n - **kwargs (optional): if provided, will be used to format the location template\n to determine the location to write to\n\n Returns:\n - Result: a new Result instance with the appropriately formatted location\n \"\"\"\n new = self.format(**kwargs)\n new.value = value_\n\n self.logger.debug(\"Starting to upload result to {}...\".format(new.location))\n\n # prepare data\n binary_data = new.serializer.serialize(new.value)\n\n # initialize client and upload\n client = self.service.get_blob_client(\n container=self.container, blob=new.location\n )\n client.upload_blob(binary_data)\n\n self.logger.debug(\"Finished uploading result to {}.\".format(new.location))\n\n return new\n\n def read(self, location: str) -> Result:\n \"\"\"\n Reads a result from an Azure Blob container and returns a corresponding `Result` instance.\n\n Args:\n - location (str): the Azure blob location to read from\n\n Returns:\n - Result: the read result\n \"\"\"\n new = self.copy()\n new.location = location\n\n try:\n self.logger.debug(\"Starting to download result from {}...\".format(location))\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n content_string = client.download_blob()\n\n try:\n new.value = new.serializer.deserialize(content_string)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n except Exception as exc:\n self.logger.exception(\n \"Unexpected error while reading from result handler: {}\".format(\n repr(exc)\n )\n )\n raise exc\n return new\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n Checks whether the target result exists.\n\n Does not validate whether the result is `valid`, only that it is present.\n\n Args:\n - location (str): Location of the result in the specific result target.\n Will check whether the provided location exists\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: whether or not the target result exists.\n \"\"\"\n from azure.core.exceptions import ResourceNotFoundError\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location.format(**kwargs)\n )\n\n # Catch exception because Azure python bindings do not yet have an exists method\n # https://github.com/Azure/azure-sdk-for-python/issues/9507\n try:\n client.get_blob_properties()\n return True\n except ResourceNotFoundError:\n return False\n", "path": "src/prefect/engine/results/azure_result.py"}]}
| 2,442 | 161 |
gh_patches_debug_57772
|
rasdani/github-patches
|
git_diff
|
celery__kombu-400
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django Kombu app conflicts with Django app with Django 1.7
``` python
Traceback (most recent call last):
File "manage.py", line 13, in <module>
execute_from_command_line(sys.argv)
File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 385, in execute_from_command_line
utility.execute()
File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute
django.setup()
File "/.../.env/lib/python2.7/site-packages/django/__init__.py", line 21, in setup
apps.populate(settings.INSTALLED_APPS)
File "/.../.env/lib/python2.7/site-packages/django/apps/registry.py", line 89, in populate
"duplicates: %s" % app_config.label)
django.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django
```
More info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)
</issue>
<code>
[start of kombu/transport/django/__init__.py]
1 """Kombu transport using the Django database as a message store."""
2 from __future__ import absolute_import
3
4 from django.conf import settings
5 from django.core import exceptions as errors
6
7 from kombu.five import Empty
8 from kombu.transport import virtual
9 from kombu.utils.encoding import bytes_to_str
10 from kombu.utils.json import loads, dumps
11
12
13 from .models import Queue
14
15 try:
16 from django.apps import AppConfig
17 except ImportError: # pragma: no cover
18 pass
19 else:
20 class KombuAppConfig(AppConfig):
21 name = __name__
22 label = name.replace('.', '_')
23 verbose_name = 'Message queue'
24 default_app_config = 'kombu.transport.django.KombuAppConfig'
25
26 VERSION = (1, 0, 0)
27 __version__ = '.'.join(map(str, VERSION))
28
29 POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',
30 getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))
31
32
33 class Channel(virtual.Channel):
34
35 def _new_queue(self, queue, **kwargs):
36 Queue.objects.get_or_create(name=queue)
37
38 def _put(self, queue, message, **kwargs):
39 Queue.objects.publish(queue, dumps(message))
40
41 def basic_consume(self, queue, *args, **kwargs):
42 qinfo = self.state.bindings[queue]
43 exchange = qinfo[0]
44 if self.typeof(exchange).type == 'fanout':
45 return
46 super(Channel, self).basic_consume(queue, *args, **kwargs)
47
48 def _get(self, queue):
49 m = Queue.objects.fetch(queue)
50 if m:
51 return loads(bytes_to_str(m))
52 raise Empty()
53
54 def _size(self, queue):
55 return Queue.objects.size(queue)
56
57 def _purge(self, queue):
58 return Queue.objects.purge(queue)
59
60 def refresh_connection(self):
61 from django import db
62 db.close_connection()
63
64
65 class Transport(virtual.Transport):
66 Channel = Channel
67
68 default_port = 0
69 polling_interval = POLLING_INTERVAL
70 channel_errors = (
71 virtual.Transport.channel_errors + (
72 errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)
73 )
74 driver_type = 'sql'
75 driver_name = 'django'
76
77 def driver_version(self):
78 import django
79 return '.'.join(map(str, django.VERSION))
80
[end of kombu/transport/django/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py
--- a/kombu/transport/django/__init__.py
+++ b/kombu/transport/django/__init__.py
@@ -18,7 +18,7 @@
pass
else:
class KombuAppConfig(AppConfig):
- name = __name__
+ name = 'kombu.transport.django'
label = name.replace('.', '_')
verbose_name = 'Message queue'
default_app_config = 'kombu.transport.django.KombuAppConfig'
|
{"golden_diff": "diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py\n--- a/kombu/transport/django/__init__.py\n+++ b/kombu/transport/django/__init__.py\n@@ -18,7 +18,7 @@\n pass\n else:\n class KombuAppConfig(AppConfig):\n- name = __name__\n+ name = 'kombu.transport.django'\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n", "issue": "Django Kombu app conflicts with Django app with Django 1.7\n``` python\nTraceback (most recent call last):\n File \"manage.py\", line 13, in <module>\n execute_from_command_line(sys.argv)\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 385, in execute_from_command_line\n utility.execute()\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 354, in execute\n django.setup()\n File \"/.../.env/lib/python2.7/site-packages/django/__init__.py\", line 21, in setup\n apps.populate(settings.INSTALLED_APPS)\n File \"/.../.env/lib/python2.7/site-packages/django/apps/registry.py\", line 89, in populate\n \"duplicates: %s\" % app_config.label)\ndjango.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django\n```\n\nMore info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)\n\n", "before_files": [{"content": "\"\"\"Kombu transport using the Django database as a message store.\"\"\"\nfrom __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core import exceptions as errors\n\nfrom kombu.five import Empty\nfrom kombu.transport import virtual\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\n\nfrom .models import Queue\n\ntry:\n from django.apps import AppConfig\nexcept ImportError: # pragma: no cover\n pass\nelse:\n class KombuAppConfig(AppConfig):\n name = __name__\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n\nVERSION = (1, 0, 0)\n__version__ = '.'.join(map(str, VERSION))\n\nPOLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',\n getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))\n\n\nclass Channel(virtual.Channel):\n\n def _new_queue(self, queue, **kwargs):\n Queue.objects.get_or_create(name=queue)\n\n def _put(self, queue, message, **kwargs):\n Queue.objects.publish(queue, dumps(message))\n\n def basic_consume(self, queue, *args, **kwargs):\n qinfo = self.state.bindings[queue]\n exchange = qinfo[0]\n if self.typeof(exchange).type == 'fanout':\n return\n super(Channel, self).basic_consume(queue, *args, **kwargs)\n\n def _get(self, queue):\n m = Queue.objects.fetch(queue)\n if m:\n return loads(bytes_to_str(m))\n raise Empty()\n\n def _size(self, queue):\n return Queue.objects.size(queue)\n\n def _purge(self, queue):\n return Queue.objects.purge(queue)\n\n def refresh_connection(self):\n from django import db\n db.close_connection()\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n default_port = 0\n polling_interval = POLLING_INTERVAL\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)\n )\n driver_type = 'sql'\n driver_name = 'django'\n\n def driver_version(self):\n import django\n return '.'.join(map(str, django.VERSION))\n", "path": "kombu/transport/django/__init__.py"}]}
| 1,460 | 136 |
gh_patches_debug_32907
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-873
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move metadata from setup.py to setup.cfg
# Description
@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.
@henryiii Can you comment on how to deal with `project_urls`?
https://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 project_urls={
83 "Documentation": "https://scikit-hep.org/pyhf/",
84 "Source": "https://github.com/scikit-hep/pyhf",
85 "Tracker": "https://github.com/scikit-hep/pyhf/issues",
86 },
87 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
88 author_email='[email protected], [email protected], [email protected]',
89 license='Apache',
90 keywords='physics fitting numpy scipy tensorflow pytorch jax',
91 classifiers=[
92 "Development Status :: 4 - Beta",
93 "License :: OSI Approved :: Apache Software License",
94 "Intended Audience :: Science/Research",
95 "Topic :: Scientific/Engineering",
96 "Topic :: Scientific/Engineering :: Physics",
97 "Programming Language :: Python :: 3",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 "Programming Language :: Python :: 3.8",
101 ],
102 package_dir={'': 'src'},
103 packages=find_packages(where='src'),
104 include_package_data=True,
105 python_requires=">=3.6",
106 install_requires=[
107 'scipy', # requires numpy, which is required by pyhf and tensorflow
108 'click>=6.0', # for console scripts,
109 'tqdm', # for readxml
110 'jsonschema>=3.2.0', # for utils
111 'jsonpatch',
112 'pyyaml', # for parsing CLI equal-delimited options
113 ],
114 extras_require=extras_require,
115 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
116 dependency_links=[],
117 use_scm_version=lambda: {'local_scheme': lambda version: ''},
118 )
119
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,9 +1,4 @@
-from setuptools import setup, find_packages
-from pathlib import Path
-
-this_directory = Path(__file__).parent.resolve()
-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
- long_description = readme_rst.read()
+from setuptools import setup
extras_require = {
'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
@@ -73,46 +68,6 @@
setup(
- name='pyhf',
- version='0.4.1',
- description='(partial) pure python histfactory implementation',
- long_description=long_description,
- long_description_content_type='text/x-rst',
- url='https://github.com/scikit-hep/pyhf',
- project_urls={
- "Documentation": "https://scikit-hep.org/pyhf/",
- "Source": "https://github.com/scikit-hep/pyhf",
- "Tracker": "https://github.com/scikit-hep/pyhf/issues",
- },
- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
- author_email='[email protected], [email protected], [email protected]',
- license='Apache',
- keywords='physics fitting numpy scipy tensorflow pytorch jax',
- classifiers=[
- "Development Status :: 4 - Beta",
- "License :: OSI Approved :: Apache Software License",
- "Intended Audience :: Science/Research",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Physics",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- ],
- package_dir={'': 'src'},
- packages=find_packages(where='src'),
- include_package_data=True,
- python_requires=">=3.6",
- install_requires=[
- 'scipy', # requires numpy, which is required by pyhf and tensorflow
- 'click>=6.0', # for console scripts,
- 'tqdm', # for readxml
- 'jsonschema>=3.2.0', # for utils
- 'jsonpatch',
- 'pyyaml', # for parsing CLI equal-delimited options
- ],
extras_require=extras_require,
- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
- dependency_links=[],
use_scm_version=lambda: {'local_scheme': lambda version: ''},
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,9 +1,4 @@\n-from setuptools import setup, find_packages\n-from pathlib import Path\n-\n-this_directory = Path(__file__).parent.resolve()\n-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n- long_description = readme_rst.read()\n+from setuptools import setup\n \n extras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n@@ -73,46 +68,6 @@\n \n \n setup(\n- name='pyhf',\n- version='0.4.1',\n- description='(partial) pure python histfactory implementation',\n- long_description=long_description,\n- long_description_content_type='text/x-rst',\n- url='https://github.com/scikit-hep/pyhf',\n- project_urls={\n- \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n- \"Source\": \"https://github.com/scikit-hep/pyhf\",\n- \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n- },\n- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n- author_email='[email protected], [email protected], [email protected]',\n- license='Apache',\n- keywords='physics fitting numpy scipy tensorflow pytorch jax',\n- classifiers=[\n- \"Development Status :: 4 - Beta\",\n- \"License :: OSI Approved :: Apache Software License\",\n- \"Intended Audience :: Science/Research\",\n- \"Topic :: Scientific/Engineering\",\n- \"Topic :: Scientific/Engineering :: Physics\",\n- \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n- ],\n- package_dir={'': 'src'},\n- packages=find_packages(where='src'),\n- include_package_data=True,\n- python_requires=\">=3.6\",\n- install_requires=[\n- 'scipy', # requires numpy, which is required by pyhf and tensorflow\n- 'click>=6.0', # for console scripts,\n- 'tqdm', # for readxml\n- 'jsonschema>=3.2.0', # for utils\n- 'jsonpatch',\n- 'pyyaml', # for parsing CLI equal-delimited options\n- ],\n extras_require=extras_require,\n- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n- dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n )\n", "issue": "Move metadata from setup.py to setup.cfg\n# Description\r\n\r\n@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.\r\n\r\n@henryiii Can you comment on how to deal with `project_urls`?\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n project_urls={\n \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n \"Source\": \"https://github.com/scikit-hep/pyhf\",\n \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,913 | 617 |
gh_patches_debug_63275
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2929
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fetchmail does not fetch mails in 2.0
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** — be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [X] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: 2.0, Container-"Id": "sha256:dd0a30f2ecb74491c49c8ab1b1d8d710eb55cc1e81d411acea9171ebd40ff314",
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoi…" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
After updating from 1.9 to 2.0 fetchmails does not fetch mails from remote POP3s-Servers. The container-logs contain the following information with log-level WARNING. Log-level DEBUG does not print more useful information
```
mailu-20-fetchmail-1 | Option --folder is not supported with POP3
mailu-20-fetchmail-1 | Option --folder is not supported with POP3
mailu-20-fetchmail-1 | Option --folder is not supported with POP3
mailu-20-fetchmail-1 | Sleeping for 1000 seconds
mailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)
mailu-20-fetchmail-1 |
mailu-20-fetchmail-1 |
mailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)
```
I copied the mailu.db and fired up the new stack based on the newly created configs and docker-compose.yml-files.
## Replication Steps
<!--
Steps for replicating your issue
-->
migrated DB from 1.9 copied to the new folder and starting the stack.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
Any chance to get more detailed information from within the container?
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesn’t work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
Fetching the remote mails and make them available in mailus mail-storage
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
</issue>
<code>
[start of optional/fetchmail/fetchmail.py]
1 #!/usr/bin/env python3
2
3 import time
4 import os
5 from pathlib import Path
6 from pwd import getpwnam
7 import tempfile
8 import shlex
9 import subprocess
10 import requests
11 from socrate import system
12 import sys
13 import traceback
14
15
16 FETCHMAIL = """
17 fetchmail -N \
18 --idfile /data/fetchids --uidl \
19 --pidfile /dev/shm/fetchmail.pid \
20 --sslcertck --sslcertpath /etc/ssl/certs \
21 -f {}
22 """
23
24
25 RC_LINE = """
26 poll "{host}" proto {protocol} port {port}
27 user "{username}" password "{password}"
28 is "{user_email}"
29 smtphost "{smtphost}"
30 {folders}
31 {options}
32 {lmtp}
33 """
34
35
36 def escape_rc_string(arg):
37 return "".join("\\x%2x" % ord(char) for char in arg)
38
39
40 def fetchmail(fetchmailrc):
41 with tempfile.NamedTemporaryFile() as handler:
42 handler.write(fetchmailrc.encode("utf8"))
43 handler.flush()
44 command = FETCHMAIL.format(shlex.quote(handler.name))
45 output = subprocess.check_output(command, shell=True)
46 return output
47
48
49 def run(debug):
50 try:
51 fetches = requests.get(f"http://{os.environ['ADMIN_ADDRESS']}:8080/internal/fetch").json()
52 for fetch in fetches:
53 fetchmailrc = ""
54 options = "options antispam 501, 504, 550, 553, 554"
55 options += " ssl" if fetch["tls"] else ""
56 options += " keep" if fetch["keep"] else " fetchall"
57 folders = "folders %s" % ((','.join('"' + item + '"' for item in fetch['folders'])) if fetch['folders'] else '"INBOX"')
58 fetchmailrc += RC_LINE.format(
59 user_email=escape_rc_string(fetch["user_email"]),
60 protocol=fetch["protocol"],
61 host=escape_rc_string(fetch["host"]),
62 port=fetch["port"],
63 smtphost=f'{os.environ["FRONT_ADDRESS"]}' if fetch['scan'] else f'{os.environ["FRONT_ADDRESS"]}/2525',
64 username=escape_rc_string(fetch["username"]),
65 password=escape_rc_string(fetch["password"]),
66 options=options,
67 folders=folders,
68 lmtp='' if fetch['scan'] else 'lmtp',
69 )
70 if debug:
71 print(fetchmailrc)
72 try:
73 print(fetchmail(fetchmailrc))
74 error_message = ""
75 except subprocess.CalledProcessError as error:
76 error_message = error.output.decode("utf8")
77 # No mail is not an error
78 if not error_message.startswith("fetchmail: No mail"):
79 print(error_message)
80 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
81 # Number of messages seen is not a error as well
82 if ("messages" in error_message and
83 "(seen " in error_message and
84 user_info in error_message):
85 print(error_message)
86 finally:
87 requests.post("http://{}:8080/internal/fetch/{}".format(os.environ['ADMIN_ADDRESS'],fetch['id']),
88 json=error_message.split('\n')[0]
89 )
90 except Exception:
91 traceback.print_exc()
92
93
94 if __name__ == "__main__":
95 id_fetchmail = getpwnam('fetchmail')
96 Path('/data/fetchids').touch()
97 os.chown("/data/fetchids", id_fetchmail.pw_uid, id_fetchmail.pw_gid)
98 os.chown("/data/", id_fetchmail.pw_uid, id_fetchmail.pw_gid)
99 os.chmod("/data/fetchids", 0o700)
100 system.drop_privs_to('fetchmail')
101 config = system.set_env()
102 while True:
103 delay = int(os.environ.get('FETCHMAIL_DELAY', 60))
104 print("Sleeping for {} seconds".format(delay))
105 time.sleep(delay)
106
107 if not config.get('FETCHMAIL_ENABLED', True):
108 print("Fetchmail disabled, skipping...")
109 continue
110
111 run(config.get('DEBUG', False))
112 sys.stdout.flush()
113
[end of optional/fetchmail/fetchmail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -64,7 +64,7 @@
username=escape_rc_string(fetch["username"]),
password=escape_rc_string(fetch["password"]),
options=options,
- folders=folders,
+ folders='' if fetch['protocol'] == 'pop3' else folders,
lmtp='' if fetch['scan'] else 'lmtp',
)
if debug:
|
{"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -64,7 +64,7 @@\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options,\n- folders=folders,\n+ folders='' if fetch['protocol'] == 'pop3' else folders,\n lmtp='' if fetch['scan'] else 'lmtp',\n )\n if debug:\n", "issue": "fetchmail does not fetch mails in 2.0\n<!--\r\n\r\nThank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.\r\nFor **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).\r\n\r\nTo be able to help you best, we need some more information.\r\n\r\nBefore you open your issue\r\n- Check if no issue or pull-request for this already exists.\r\n- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- You understand `Mailu` is made by volunteers in their **free time** \u2014 be concise, civil and accept that delays can occur.\r\n- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\nPlease put your text outside of the comment blocks to be visible. You can use the button \"Preview\" above to check.\r\n\r\n-->\r\n\r\n## Environment & Version\r\n\r\n### Environment\r\n\r\n- [X] docker compose\r\n- [ ] kubernetes\r\n- [ ] docker swarm\r\n\r\n### Version\r\n\r\n- Version: 2.0, Container-\"Id\": \"sha256:dd0a30f2ecb74491c49c8ab1b1d8d710eb55cc1e81d411acea9171ebd40ff314\",\r\n\r\n<!--\r\nTo find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).\r\n\r\n$> docker ps -a | grep mailu\r\n140b09d4b09c mailu/roundcube:1.7 \"docker-php-entrypoi\u2026\" 2 weeks ago Up 2 days (healthy) 80/tcp\r\n$> grep MAILU_VERSION docker-compose.yml mailu.env\r\n-->\r\n\r\n## Description\r\n<!--\r\nFurther explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.\r\n-->\r\nAfter updating from 1.9 to 2.0 fetchmails does not fetch mails from remote POP3s-Servers. The container-logs contain the following information with log-level WARNING. Log-level DEBUG does not print more useful information\r\n\r\n```\r\nmailu-20-fetchmail-1 | Option --folder is not supported with POP3\r\nmailu-20-fetchmail-1 | Option --folder is not supported with POP3\r\nmailu-20-fetchmail-1 | Option --folder is not supported with POP3\r\nmailu-20-fetchmail-1 | Sleeping for 1000 seconds\r\nmailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)\r\nmailu-20-fetchmail-1 |\r\nmailu-20-fetchmail-1 |\r\nmailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)\r\n```\r\nI copied the mailu.db and fired up the new stack based on the newly created configs and docker-compose.yml-files.\r\n\r\n## Replication Steps\r\n<!--\r\nSteps for replicating your issue\r\n-->\r\nmigrated DB from 1.9 copied to the new folder and starting the stack.\r\n\r\n## Observed behaviour\r\n<!--\r\nExplain or paste the result you received.\r\n-->\r\nAny chance to get more detailed information from within the container?\r\n\r\n## Expected behaviour\r\n<!--\r\nExplain what results you expected - be as specific as possible.\r\nJust saying \"it doesn\u2019t work as expected\" is not useful. It's also helpful to describe what you actually experienced.\r\n-->\r\nFetching the remote mails and make them available in mailus mail-storage\r\n\r\n## Logs\r\n<!--\r\nOften it is very useful to include log fragments of the involved component.\r\nYou can get the logs via `docker logs <container name> --tail 1000`.\r\nFor example for the admin container: `docker logs mailu_admin_1 --tail 1000`\r\nor using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`\r\n\r\nIf you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:\r\n\r\n```\r\nYour logs here!\r\n```\r\n-->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport time\nimport os\nfrom pathlib import Path\nfrom pwd import getpwnam\nimport tempfile\nimport shlex\nimport subprocess\nimport requests\nfrom socrate import system\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --idfile /data/fetchids --uidl \\\n --pidfile /dev/shm/fetchmail.pid \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {folders}\n {options}\n {lmtp}\n\"\"\"\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(f\"http://{os.environ['ADMIN_ADDRESS']}:8080/internal/fetch\").json()\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n folders = \"folders %s\" % ((','.join('\"' + item + '\"' for item in fetch['folders'])) if fetch['folders'] else '\"INBOX\"')\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=f'{os.environ[\"FRONT_ADDRESS\"]}' if fetch['scan'] else f'{os.environ[\"FRONT_ADDRESS\"]}/2525',\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options,\n folders=folders,\n lmtp='' if fetch['scan'] else 'lmtp',\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://{}:8080/internal/fetch/{}\".format(os.environ['ADMIN_ADDRESS'],fetch['id']),\n json=error_message.split('\\n')[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n id_fetchmail = getpwnam('fetchmail')\n Path('/data/fetchids').touch()\n os.chown(\"/data/fetchids\", id_fetchmail.pw_uid, id_fetchmail.pw_gid)\n os.chown(\"/data/\", id_fetchmail.pw_uid, id_fetchmail.pw_gid)\n os.chmod(\"/data/fetchids\", 0o700)\n system.drop_privs_to('fetchmail')\n config = system.set_env()\n while True:\n delay = int(os.environ.get('FETCHMAIL_DELAY', 60))\n print(\"Sleeping for {} seconds\".format(delay))\n time.sleep(delay)\n\n if not config.get('FETCHMAIL_ENABLED', True):\n print(\"Fetchmail disabled, skipping...\")\n continue\n\n run(config.get('DEBUG', False))\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]}
| 2,643 | 127 |
gh_patches_debug_27479
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1308
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aioredis instrumentation: AttributeError: 'str' object has no attribute 'decode'
**Describe the bug**
In some cases `aioredis` passes `str` commands to its `aioredis.Redis.execute()` method rather than `bytes` commands, like e.g. when executing `aioredis.Redis.ping()`, which is unacceptable by `elastic-apm`.
**Current workaround**
Disable `aioredis` instrumenting (set environment variable `SKIP_INSTRUMENT_AIOREDIS="true"`) prior `elasticapm.instrument()` is called.
**To Reproduce**
```python3
import asyncio
import aioredis
import elasticapm
REDIS_URL = "redis://localhost:6379"
async def main():
elasticapm.instrument()
client = await aioredis.create_redis_pool(REDIS_URL)
elastic_apm_client = elasticapm.Client()
elastic_apm_client.begin_transaction("redis-ping")
try:
assert await client.ping() == b"PONG"
finally:
client.close()
await client.wait_closed()
elastic_apm_client.end_transaction("redis-ping")
if __name__ == "__main__":
asyncio.run(main())
```
**Expected result**
No exception is raised.
**Actual result**
```python3
Traceback (most recent call last):
File "<...>/test.py", line 24, in <module>
asyncio.run(main())
File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "<...>/test.py", line 17, in main
assert await client.ping() == b"PONG"
File "<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py", line 114, in ping
return self.execute('PING', *args, encoding=encoding)
File "<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py", line 51, in execute
return self._pool_or_conn.execute(command, *args, **kwargs)
File "<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/asyncio/aioredis.py", line 46, in call
wrapped_name = args[0].decode()
AttributeError: 'str' object has no attribute 'decode'
```
**Environment**
- OS: Ubuntu 18.04.5 LTS
- Python version: 3.8.9
- Framework and version: `aioredis==1.3.1`
- APM Server version: --
- Agent version: `elasticapm==6.3.3`
</issue>
<code>
[start of elasticapm/instrumentation/packages/asyncio/aioredis.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 from elasticapm.contrib.asyncio.traces import async_capture_span
34 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
35 from elasticapm.traces import execution_context
36
37
38 class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):
39 name = "aioredis"
40
41 instrument_list = [("aioredis.pool", "ConnectionsPool.execute"),
42 ("aioredis.pool", "ConnectionsPool.execute_pubsub")]
43
44 def call(self, module, method, wrapped, instance, args, kwargs):
45 if len(args) > 0:
46 wrapped_name = args[0].decode()
47 else:
48 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
49
50 with async_capture_span(
51 wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True
52 ) as span:
53 span.context["destination"] = _get_destination_info(instance)
54
55 return wrapped(*args, **kwargs)
56
57
58 class RedisPipelineInstrumentation(AbstractInstrumentedModule):
59 name = "aioredis"
60
61 instrument_list = [("aioredis.commands.transaction", "Pipeline.execute")]
62
63 def call(self, module, method, wrapped, instance, args, kwargs):
64 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
65
66 with async_capture_span(
67 wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True
68 ) as span:
69 span.context["destination"] = _get_destination_info(instance)
70
71 return wrapped(*args, **kwargs)
72
73
74 class RedisConnectionInstrumentation(AbstractInstrumentedModule):
75 name = "aioredis"
76
77 instrument_list = (("aioredis.connection", "RedisConnection.execute"),
78 ("aioredis.pool", "ConnectionsPool.execute_pubsub"))
79
80 def call(self, module, method, wrapped, instance, args, kwargs):
81 span = execution_context.get_span()
82 if span and span.subtype == "aioredis":
83 span.context["destination"] = _get_destination_info(instance)
84 return wrapped(*args, **kwargs)
85
86
87 def _get_destination_info(connection):
88 destination_info = {"service": {"name": "aioredis", "resource": "redis", "type": "db"}}
89
90 if hasattr(connection, "_pool_or_conn"):
91 destination_info["port"] = connection._pool_or_conn.address[1]
92 destination_info["address"] = connection._pool_or_conn.address[0]
93 else:
94 destination_info["port"] = connection.address[1]
95 destination_info["address"] = connection.address[0]
96
97 return destination_info
98
[end of elasticapm/instrumentation/packages/asyncio/aioredis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/instrumentation/packages/asyncio/aioredis.py b/elasticapm/instrumentation/packages/asyncio/aioredis.py
--- a/elasticapm/instrumentation/packages/asyncio/aioredis.py
+++ b/elasticapm/instrumentation/packages/asyncio/aioredis.py
@@ -38,12 +38,16 @@
class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):
name = "aioredis"
- instrument_list = [("aioredis.pool", "ConnectionsPool.execute"),
- ("aioredis.pool", "ConnectionsPool.execute_pubsub")]
+ instrument_list = [
+ ("aioredis.pool", "ConnectionsPool.execute"),
+ ("aioredis.pool", "ConnectionsPool.execute_pubsub"),
+ ]
def call(self, module, method, wrapped, instance, args, kwargs):
if len(args) > 0:
- wrapped_name = args[0].decode()
+ wrapped_name = args[0]
+ if isinstance(wrapped_name, bytes):
+ wrapped_name = wrapped_name.decode()
else:
wrapped_name = self.get_wrapped_name(wrapped, instance, method)
@@ -74,8 +78,10 @@
class RedisConnectionInstrumentation(AbstractInstrumentedModule):
name = "aioredis"
- instrument_list = (("aioredis.connection", "RedisConnection.execute"),
- ("aioredis.pool", "ConnectionsPool.execute_pubsub"))
+ instrument_list = (
+ ("aioredis.connection", "RedisConnection.execute"),
+ ("aioredis.pool", "ConnectionsPool.execute_pubsub"),
+ )
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/aioredis.py b/elasticapm/instrumentation/packages/asyncio/aioredis.py\n--- a/elasticapm/instrumentation/packages/asyncio/aioredis.py\n+++ b/elasticapm/instrumentation/packages/asyncio/aioredis.py\n@@ -38,12 +38,16 @@\n class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n \n- instrument_list = [(\"aioredis.pool\", \"ConnectionsPool.execute\"),\n- (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\")]\n+ instrument_list = [\n+ (\"aioredis.pool\", \"ConnectionsPool.execute\"),\n+ (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n+ ]\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n- wrapped_name = args[0].decode()\n+ wrapped_name = args[0]\n+ if isinstance(wrapped_name, bytes):\n+ wrapped_name = wrapped_name.decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n \n@@ -74,8 +78,10 @@\n class RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n \n- instrument_list = ((\"aioredis.connection\", \"RedisConnection.execute\"),\n- (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"))\n+ instrument_list = (\n+ (\"aioredis.connection\", \"RedisConnection.execute\"),\n+ (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n+ )\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n", "issue": "aioredis instrumentation: AttributeError: 'str' object has no attribute 'decode'\n**Describe the bug**\r\nIn some cases `aioredis` passes `str` commands to its `aioredis.Redis.execute()` method rather than `bytes` commands, like e.g. when executing `aioredis.Redis.ping()`, which is unacceptable by `elastic-apm`. \r\n\r\n**Current workaround**\r\nDisable `aioredis` instrumenting (set environment variable `SKIP_INSTRUMENT_AIOREDIS=\"true\"`) prior `elasticapm.instrument()` is called.\r\n\r\n**To Reproduce**\r\n```python3\r\nimport asyncio\r\n\r\nimport aioredis\r\nimport elasticapm\r\n\r\nREDIS_URL = \"redis://localhost:6379\"\r\n\r\n\r\nasync def main():\r\n elasticapm.instrument()\r\n\r\n client = await aioredis.create_redis_pool(REDIS_URL)\r\n elastic_apm_client = elasticapm.Client()\r\n\r\n elastic_apm_client.begin_transaction(\"redis-ping\")\r\n try:\r\n assert await client.ping() == b\"PONG\"\r\n finally:\r\n client.close()\r\n await client.wait_closed()\r\n elastic_apm_client.end_transaction(\"redis-ping\")\r\n\r\nif __name__ == \"__main__\":\r\n asyncio.run(main())\r\n```\r\n\r\n**Expected result**\r\nNo exception is raised.\r\n\r\n**Actual result**\r\n```python3\r\nTraceback (most recent call last):\r\n File \"<...>/test.py\", line 24, in <module>\r\n asyncio.run(main())\r\n File \"/usr/local/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"/usr/local/lib/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\r\n return future.result()\r\n File \"<...>/test.py\", line 17, in main\r\n assert await client.ping() == b\"PONG\"\r\n File \"<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py\", line 114, in ping\r\n return self.execute('PING', *args, encoding=encoding)\r\n File \"<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py\", line 51, in execute\r\n return self._pool_or_conn.execute(command, *args, **kwargs)\r\n File \"<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/asyncio/aioredis.py\", line 46, in call\r\n wrapped_name = args[0].decode()\r\nAttributeError: 'str' object has no attribute 'decode'\r\n```\r\n\r\n**Environment**\r\n- OS: Ubuntu 18.04.5 LTS\r\n- Python version: 3.8.9\r\n- Framework and version: `aioredis==1.3.1`\r\n- APM Server version: --\r\n- Agent version: `elasticapm==6.3.3`\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\n\n\nclass RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.pool\", \"ConnectionsPool.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = args[0].decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.commands.transaction\", \"Pipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = ((\"aioredis.connection\", \"RedisConnection.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"))\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"aioredis\":\n span.context[\"destination\"] = _get_destination_info(instance)\n return wrapped(*args, **kwargs)\n\n\ndef _get_destination_info(connection):\n destination_info = {\"service\": {\"name\": \"aioredis\", \"resource\": \"redis\", \"type\": \"db\"}}\n\n if hasattr(connection, \"_pool_or_conn\"):\n destination_info[\"port\"] = connection._pool_or_conn.address[1]\n destination_info[\"address\"] = connection._pool_or_conn.address[0]\n else:\n destination_info[\"port\"] = connection.address[1]\n destination_info[\"address\"] = connection.address[0]\n\n return destination_info\n", "path": "elasticapm/instrumentation/packages/asyncio/aioredis.py"}]}
| 2,316 | 390 |
gh_patches_debug_25118
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-375
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User profile failing to load
**Describe the bug**
Attempting to access https://bookwyrm.social/user/jonathan.brodsky causes a
```
Server Error
Something went wrong! Sorry about that.
```
**To Reproduce**
Go to https://bookwyrm.social/user/jonathan.brodsky
It appears to apply to all urls within user/jonathan.brodsky - see https://bookwyrm.social/user/jonathan.brodsky/generatednote/1994
**Expected behavior**
A user profile page loads.
**Desktop (please complete the following information):**
- OS: windows
- Browser Chrome
- Version 87.0.4280.66
</issue>
<code>
[start of bookwyrm/urls.py]
1 ''' url routing for the app and api '''
2 from django.conf.urls.static import static
3 from django.contrib import admin
4 from django.urls import path, re_path
5
6 from bookwyrm import incoming, outgoing, views, settings, wellknown
7 from bookwyrm import view_actions as actions
8
9 username_regex = r'(?P<username>[\w\-_]+@[\w\-\_\.]+)'
10 localname_regex = r'(?P<username>[\w\-_]+)'
11 user_path = r'^user/%s' % username_regex
12 local_user_path = r'^user/%s' % localname_regex
13
14 status_types = [
15 'status',
16 'review',
17 'comment',
18 'quotation',
19 'boost',
20 'generatednote'
21 ]
22 status_path = r'%s/(%s)/(?P<status_id>\d+)' % \
23 (local_user_path, '|'.join(status_types))
24
25 book_path = r'^book/(?P<book_id>\d+)'
26
27 handler404 = 'bookwyrm.views.not_found_page'
28 handler500 = 'bookwyrm.views.server_error_page'
29 urlpatterns = [
30 path('admin/', admin.site.urls),
31
32 # federation endpoints
33 re_path(r'^inbox/?$', incoming.shared_inbox),
34 re_path(r'%s/inbox/?$' % local_user_path, incoming.inbox),
35 re_path(r'%s/outbox/?$' % local_user_path, outgoing.outbox),
36
37 # .well-known endpoints
38 re_path(r'^.well-known/webfinger/?$', wellknown.webfinger),
39 re_path(r'^.well-known/nodeinfo/?$', wellknown.nodeinfo_pointer),
40 re_path(r'^nodeinfo/2\.0/?$', wellknown.nodeinfo),
41 re_path(r'^api/v1/instance/?$', wellknown.instance_info),
42 re_path(r'^api/v1/instance/peers/?$', wellknown.peers),
43 # TODO: re_path(r'^.well-known/host-meta/?$', incoming.host_meta),
44 # TODO: robots.txt
45
46 # ui views
47 re_path(r'^login/?$', views.login_page),
48 re_path(r'^about/?$', views.about_page),
49 re_path(r'^password-reset/?$', views.password_reset_request),
50 re_path(r'^password-reset/(?P<code>[A-Za-z0-9]+)/?$', views.password_reset),
51 re_path(r'^invite/?$', views.manage_invites),
52 re_path(r'^invite/(?P<code>[A-Za-z0-9]+)/?$', views.invite_page),
53
54 path('', views.home),
55 re_path(r'^(?P<tab>home|local|federated)/?$', views.home_tab),
56 re_path(r'^notifications/?', views.notifications_page),
57 re_path(r'^import/?$', views.import_page),
58 re_path(r'^import-status/(\d+)/?$', views.import_status),
59 re_path(r'^user-edit/?$', views.edit_profile_page),
60
61 # should return a ui view or activitypub json blob as requested
62 # users
63 re_path(r'%s/?$' % user_path, views.user_page),
64 re_path(r'%s/?$' % local_user_path, views.user_page),
65 re_path(r'%s\.json$' % local_user_path, views.user_page),
66 re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),
67 re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),
68 re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),
69
70 # statuses
71 re_path(r'%s(.json)?/?$' % status_path, views.status_page),
72 re_path(r'%s/activity/?$' % status_path, views.status_page),
73 re_path(r'%s/replies(.json)?/?$' % status_path, views.replies_page),
74
75 # books
76 re_path(r'%s(.json)?/?$' % book_path, views.book_page),
77 re_path(r'%s/edit/?$' % book_path, views.edit_book_page),
78 re_path(r'%s/editions(.json)?/?$' % book_path, views.editions_page),
79
80 re_path(r'^author/(?P<author_id>[\w\-]+)(.json)?/?$', views.author_page),
81 re_path(r'^tag/(?P<tag_id>.+)\.json/?$', views.tag_page),
82 re_path(r'^tag/(?P<tag_id>.+)/?$', views.tag_page),
83 re_path(r'^%s/shelf/(?P<shelf_identifier>[\w-]+)(.json)?/?$' % \
84 user_path, views.shelf_page),
85 re_path(r'^%s/shelf/(?P<shelf_identifier>[\w-]+)(.json)?/?$' % \
86 local_user_path, views.shelf_page),
87
88 re_path(r'^search/?$', views.search),
89
90 # internal action endpoints
91 re_path(r'^logout/?$', actions.user_logout),
92 re_path(r'^user-login/?$', actions.user_login),
93 re_path(r'^user-register/?$', actions.register),
94 re_path(r'^reset-password-request/?$', actions.password_reset_request),
95 re_path(r'^reset-password/?$', actions.password_reset),
96 re_path(r'^change-password/?$', actions.password_change),
97
98 re_path(r'^edit-profile/?$', actions.edit_profile),
99
100 re_path(r'^import-data/?', actions.import_data),
101 re_path(r'^retry-import/?', actions.retry_import),
102 re_path(r'^resolve-book/?', actions.resolve_book),
103 re_path(r'^edit-book/(?P<book_id>\d+)/?', actions.edit_book),
104 re_path(r'^upload-cover/(?P<book_id>\d+)/?', actions.upload_cover),
105 re_path(r'^add-description/(?P<book_id>\d+)/?', actions.add_description),
106
107 re_path(r'^edit-readthrough/?', actions.edit_readthrough),
108 re_path(r'^delete-readthrough/?', actions.delete_readthrough),
109
110 re_path(r'^rate/?$', actions.rate),
111 re_path(r'^review/?$', actions.review),
112 re_path(r'^quote/?$', actions.quotate),
113 re_path(r'^comment/?$', actions.comment),
114 re_path(r'^tag/?$', actions.tag),
115 re_path(r'^untag/?$', actions.untag),
116 re_path(r'^reply/?$', actions.reply),
117
118 re_path(r'^favorite/(?P<status_id>\d+)/?$', actions.favorite),
119 re_path(r'^unfavorite/(?P<status_id>\d+)/?$', actions.unfavorite),
120 re_path(r'^boost/(?P<status_id>\d+)/?$', actions.boost),
121 re_path(r'^unboost/(?P<status_id>\d+)/?$', actions.unboost),
122
123 re_path(r'^delete-status/(?P<status_id>\d+)/?$', actions.delete_status),
124
125 re_path(r'^create-shelf/?$', actions.create_shelf),
126 re_path(r'^edit-shelf/(?P<shelf_id>\d+)?$', actions.edit_shelf),
127 re_path(r'^delete-shelf/(?P<shelf_id>\d+)?$', actions.delete_shelf),
128 re_path(r'^shelve/?$', actions.shelve),
129 re_path(r'^unshelve/?$', actions.unshelve),
130 re_path(r'^start-reading/(?P<book_id>\d+)/?$', actions.start_reading),
131 re_path(r'^finish-reading/(?P<book_id>\d+)/?$', actions.finish_reading),
132
133 re_path(r'^follow/?$', actions.follow),
134 re_path(r'^unfollow/?$', actions.unfollow),
135 re_path(r'^accept-follow-request/?$', actions.accept_follow_request),
136 re_path(r'^delete-follow-request/?$', actions.delete_follow_request),
137
138 re_path(r'^clear-notifications/?$', actions.clear_notifications),
139
140 re_path(r'^create-invite/?$', actions.create_invite),
141
142 ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
143
[end of bookwyrm/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/urls.py b/bookwyrm/urls.py
--- a/bookwyrm/urls.py
+++ b/bookwyrm/urls.py
@@ -6,8 +6,8 @@
from bookwyrm import incoming, outgoing, views, settings, wellknown
from bookwyrm import view_actions as actions
-username_regex = r'(?P<username>[\w\-_]+@[\w\-\_\.]+)'
-localname_regex = r'(?P<username>[\w\-_]+)'
+username_regex = r'(?P<username>[\w\-_\.]+@[\w\-\_\.]+)'
+localname_regex = r'(?P<username>[\w\-_\.]+)'
user_path = r'^user/%s' % username_regex
local_user_path = r'^user/%s' % localname_regex
@@ -61,8 +61,8 @@
# should return a ui view or activitypub json blob as requested
# users
re_path(r'%s/?$' % user_path, views.user_page),
- re_path(r'%s/?$' % local_user_path, views.user_page),
re_path(r'%s\.json$' % local_user_path, views.user_page),
+ re_path(r'%s/?$' % local_user_path, views.user_page),
re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),
re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),
re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),
|
{"golden_diff": "diff --git a/bookwyrm/urls.py b/bookwyrm/urls.py\n--- a/bookwyrm/urls.py\n+++ b/bookwyrm/urls.py\n@@ -6,8 +6,8 @@\n from bookwyrm import incoming, outgoing, views, settings, wellknown\n from bookwyrm import view_actions as actions\n \n-username_regex = r'(?P<username>[\\w\\-_]+@[\\w\\-\\_\\.]+)'\n-localname_regex = r'(?P<username>[\\w\\-_]+)'\n+username_regex = r'(?P<username>[\\w\\-_\\.]+@[\\w\\-\\_\\.]+)'\n+localname_regex = r'(?P<username>[\\w\\-_\\.]+)'\n user_path = r'^user/%s' % username_regex\n local_user_path = r'^user/%s' % localname_regex\n \n@@ -61,8 +61,8 @@\n # should return a ui view or activitypub json blob as requested\n # users\n re_path(r'%s/?$' % user_path, views.user_page),\n- re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s\\.json$' % local_user_path, views.user_page),\n+ re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),\n re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),\n re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),\n", "issue": "User profile failing to load\n**Describe the bug**\r\nAttempting to access https://bookwyrm.social/user/jonathan.brodsky causes a\r\n```\r\nServer Error\r\nSomething went wrong! Sorry about that.\r\n```\r\n\r\n**To Reproduce**\r\nGo to https://bookwyrm.social/user/jonathan.brodsky\r\nIt appears to apply to all urls within user/jonathan.brodsky - see https://bookwyrm.social/user/jonathan.brodsky/generatednote/1994\r\n\r\n**Expected behavior**\r\nA user profile page loads.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows\r\n - Browser Chrome\r\n - Version 87.0.4280.66\n", "before_files": [{"content": "''' url routing for the app and api '''\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, re_path\n\nfrom bookwyrm import incoming, outgoing, views, settings, wellknown\nfrom bookwyrm import view_actions as actions\n\nusername_regex = r'(?P<username>[\\w\\-_]+@[\\w\\-\\_\\.]+)'\nlocalname_regex = r'(?P<username>[\\w\\-_]+)'\nuser_path = r'^user/%s' % username_regex\nlocal_user_path = r'^user/%s' % localname_regex\n\nstatus_types = [\n 'status',\n 'review',\n 'comment',\n 'quotation',\n 'boost',\n 'generatednote'\n]\nstatus_path = r'%s/(%s)/(?P<status_id>\\d+)' % \\\n (local_user_path, '|'.join(status_types))\n\nbook_path = r'^book/(?P<book_id>\\d+)'\n\nhandler404 = 'bookwyrm.views.not_found_page'\nhandler500 = 'bookwyrm.views.server_error_page'\nurlpatterns = [\n path('admin/', admin.site.urls),\n\n # federation endpoints\n re_path(r'^inbox/?$', incoming.shared_inbox),\n re_path(r'%s/inbox/?$' % local_user_path, incoming.inbox),\n re_path(r'%s/outbox/?$' % local_user_path, outgoing.outbox),\n\n # .well-known endpoints\n re_path(r'^.well-known/webfinger/?$', wellknown.webfinger),\n re_path(r'^.well-known/nodeinfo/?$', wellknown.nodeinfo_pointer),\n re_path(r'^nodeinfo/2\\.0/?$', wellknown.nodeinfo),\n re_path(r'^api/v1/instance/?$', wellknown.instance_info),\n re_path(r'^api/v1/instance/peers/?$', wellknown.peers),\n # TODO: re_path(r'^.well-known/host-meta/?$', incoming.host_meta),\n # TODO: robots.txt\n\n # ui views\n re_path(r'^login/?$', views.login_page),\n re_path(r'^about/?$', views.about_page),\n re_path(r'^password-reset/?$', views.password_reset_request),\n re_path(r'^password-reset/(?P<code>[A-Za-z0-9]+)/?$', views.password_reset),\n re_path(r'^invite/?$', views.manage_invites),\n re_path(r'^invite/(?P<code>[A-Za-z0-9]+)/?$', views.invite_page),\n\n path('', views.home),\n re_path(r'^(?P<tab>home|local|federated)/?$', views.home_tab),\n re_path(r'^notifications/?', views.notifications_page),\n re_path(r'^import/?$', views.import_page),\n re_path(r'^import-status/(\\d+)/?$', views.import_status),\n re_path(r'^user-edit/?$', views.edit_profile_page),\n\n # should return a ui view or activitypub json blob as requested\n # users\n re_path(r'%s/?$' % user_path, views.user_page),\n re_path(r'%s/?$' % local_user_path, views.user_page),\n re_path(r'%s\\.json$' % local_user_path, views.user_page),\n re_path(r'%s/shelves/?$' % local_user_path, views.user_shelves_page),\n re_path(r'%s/followers(.json)?/?$' % local_user_path, views.followers_page),\n re_path(r'%s/following(.json)?/?$' % local_user_path, views.following_page),\n\n # statuses\n re_path(r'%s(.json)?/?$' % status_path, views.status_page),\n re_path(r'%s/activity/?$' % status_path, views.status_page),\n re_path(r'%s/replies(.json)?/?$' % status_path, views.replies_page),\n\n # books\n re_path(r'%s(.json)?/?$' % book_path, views.book_page),\n re_path(r'%s/edit/?$' % book_path, views.edit_book_page),\n re_path(r'%s/editions(.json)?/?$' % book_path, views.editions_page),\n\n re_path(r'^author/(?P<author_id>[\\w\\-]+)(.json)?/?$', views.author_page),\n re_path(r'^tag/(?P<tag_id>.+)\\.json/?$', views.tag_page),\n re_path(r'^tag/(?P<tag_id>.+)/?$', views.tag_page),\n re_path(r'^%s/shelf/(?P<shelf_identifier>[\\w-]+)(.json)?/?$' % \\\n user_path, views.shelf_page),\n re_path(r'^%s/shelf/(?P<shelf_identifier>[\\w-]+)(.json)?/?$' % \\\n local_user_path, views.shelf_page),\n\n re_path(r'^search/?$', views.search),\n\n # internal action endpoints\n re_path(r'^logout/?$', actions.user_logout),\n re_path(r'^user-login/?$', actions.user_login),\n re_path(r'^user-register/?$', actions.register),\n re_path(r'^reset-password-request/?$', actions.password_reset_request),\n re_path(r'^reset-password/?$', actions.password_reset),\n re_path(r'^change-password/?$', actions.password_change),\n\n re_path(r'^edit-profile/?$', actions.edit_profile),\n\n re_path(r'^import-data/?', actions.import_data),\n re_path(r'^retry-import/?', actions.retry_import),\n re_path(r'^resolve-book/?', actions.resolve_book),\n re_path(r'^edit-book/(?P<book_id>\\d+)/?', actions.edit_book),\n re_path(r'^upload-cover/(?P<book_id>\\d+)/?', actions.upload_cover),\n re_path(r'^add-description/(?P<book_id>\\d+)/?', actions.add_description),\n\n re_path(r'^edit-readthrough/?', actions.edit_readthrough),\n re_path(r'^delete-readthrough/?', actions.delete_readthrough),\n\n re_path(r'^rate/?$', actions.rate),\n re_path(r'^review/?$', actions.review),\n re_path(r'^quote/?$', actions.quotate),\n re_path(r'^comment/?$', actions.comment),\n re_path(r'^tag/?$', actions.tag),\n re_path(r'^untag/?$', actions.untag),\n re_path(r'^reply/?$', actions.reply),\n\n re_path(r'^favorite/(?P<status_id>\\d+)/?$', actions.favorite),\n re_path(r'^unfavorite/(?P<status_id>\\d+)/?$', actions.unfavorite),\n re_path(r'^boost/(?P<status_id>\\d+)/?$', actions.boost),\n re_path(r'^unboost/(?P<status_id>\\d+)/?$', actions.unboost),\n\n re_path(r'^delete-status/(?P<status_id>\\d+)/?$', actions.delete_status),\n\n re_path(r'^create-shelf/?$', actions.create_shelf),\n re_path(r'^edit-shelf/(?P<shelf_id>\\d+)?$', actions.edit_shelf),\n re_path(r'^delete-shelf/(?P<shelf_id>\\d+)?$', actions.delete_shelf),\n re_path(r'^shelve/?$', actions.shelve),\n re_path(r'^unshelve/?$', actions.unshelve),\n re_path(r'^start-reading/(?P<book_id>\\d+)/?$', actions.start_reading),\n re_path(r'^finish-reading/(?P<book_id>\\d+)/?$', actions.finish_reading),\n\n re_path(r'^follow/?$', actions.follow),\n re_path(r'^unfollow/?$', actions.unfollow),\n re_path(r'^accept-follow-request/?$', actions.accept_follow_request),\n re_path(r'^delete-follow-request/?$', actions.delete_follow_request),\n\n re_path(r'^clear-notifications/?$', actions.clear_notifications),\n\n re_path(r'^create-invite/?$', actions.create_invite),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "bookwyrm/urls.py"}]}
| 2,716 | 364 |
gh_patches_debug_31884
|
rasdani/github-patches
|
git_diff
|
apluslms__a-plus-560
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A+ front page
The A+ front page should display the name of the course instance in addition to the course name and code; Multiple instances of the same course are hard to separate in the A+ front page because the only different parts are the open/close dates and the image. Course instances have names, so that should be displayed in the front page too. Jaakko has some ideas; contact him!
</issue>
<code>
[start of course/views.py]
1 import datetime
2
3 import icalendar
4 from django.conf import settings
5 from django.contrib import messages
6 from django.core.exceptions import PermissionDenied
7 from django.http import Http404
8 from django.http.response import HttpResponse
9 from django.shortcuts import get_object_or_404, redirect
10 from django.utils import html
11 from django.utils import timezone
12 from django.utils.translation import ugettext_lazy as _
13
14 from authorization.permissions import ACCESS
15 from exercise.cache.hierarchy import NoSuchContent
16 from exercise.models import LearningObject
17 from lib.helpers import settings_text
18 from lib.viewbase import BaseTemplateView, BaseRedirectMixin, BaseFormView, BaseView
19 from userprofile.viewbase import UserProfileView
20 from .forms import GroupsForm, GroupSelectForm
21 from .models import CourseInstance, Enrollment
22 from .permissions import EnrollInfoVisiblePermission
23 from .renders import group_info_context
24 from .viewbase import CourseModuleBaseView, CourseInstanceMixin, EnrollableViewMixin
25
26
27 class HomeView(UserProfileView):
28 access_mode = ACCESS.ANONYMOUS
29 template_name = "course/index.html"
30
31 def get_common_objects(self):
32 super().get_common_objects()
33 self.welcome_text = settings_text('WELCOME_TEXT')
34 self.internal_user_label = settings_text('INTERNAL_USER_LABEL')
35 self.external_user_label = settings_text('EXTERNAL_USER_LABEL')
36 self.instances = []
37 prio2 = []
38 treshold = timezone.now() - datetime.timedelta(days=10)
39 for instance in CourseInstance.objects.get_visible(self.request.user)\
40 .filter(ending_time__gte=timezone.now()):
41 if instance.starting_time > treshold:
42 self.instances += [instance]
43 else:
44 prio2 += [instance]
45 self.instances += prio2
46 self.note("welcome_text", "internal_user_label", "external_user_label", "instances")
47
48
49 class ArchiveView(UserProfileView):
50 access_mode = ACCESS.ANONYMOUS
51 template_name = "course/archive.html"
52
53 def get_common_objects(self):
54 super().get_common_objects()
55 self.instances = CourseInstance.objects.get_visible(self.request.user)
56 self.note("instances")
57
58 class InstanceView(EnrollableViewMixin, BaseTemplateView):
59 access_mode = ACCESS.STUDENT
60 # ACCESS.STUDENT requires users to log in, but the access mode is dropped
61 # in public courses. CourseVisiblePermission has more restrictions as well.
62 template_name = "course/course.html"
63
64 def handle_no_permission(self):
65 if self.request.user.is_authenticated \
66 and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED:
67 # The course instance is visible to only enrolled students, so
68 # redirect the user to the enroll page instead of showing
69 # a 403 Forbidden error.
70 return redirect(self.instance.get_url('enroll'))
71 return super().handle_no_permission()
72
73 def get(self, request, *args, **kwargs):
74 # external LTI Tool Providers may return the user to the course instance view
75 # with a message given in GET query parameters
76 lti_error_msg = request.GET.get('lti_errormsg')
77 lti_msg = request.GET.get('lti_msg')
78 # message HTML is not escaped in the templates so escape it here
79 if lti_error_msg:
80 messages.error(request, html.escape(lti_error_msg))
81 elif lti_msg:
82 messages.info(request, html.escape(lti_msg))
83
84 return super().get(request, *args, **kwargs)
85
86
87 class Enroll(EnrollableViewMixin, BaseRedirectMixin, BaseTemplateView):
88 permission_classes = [EnrollInfoVisiblePermission]
89 course_permission_classes = []
90 template_name = "course/enroll.html"
91
92 def post(self, request, *args, **kwargs):
93
94 if self.is_student or not self.enrollable:
95 messages.error(self.request, _("You cannot enroll, or have already enrolled, in this course."))
96 raise PermissionDenied()
97
98 if not self.instance.is_enrollment_open():
99 messages.error(self.request, _("The enrollment is not open."))
100 raise PermissionDenied()
101
102 # Support enrollment questionnaires.
103 exercise = LearningObject.objects.find_enrollment_exercise(
104 self.instance, self.profile)
105 if exercise:
106 return self.redirect(exercise.get_absolute_url())
107
108 self.instance.enroll_student(self.request.user)
109 return self.redirect(self.instance.get_absolute_url())
110
111
112 class ModuleView(CourseModuleBaseView):
113 template_name = "course/module.html"
114
115 def get_common_objects(self):
116 super().get_common_objects()
117 self.now = timezone.now()
118 try:
119 self.children = self.content.flat_module(self.module)
120 cur, tree, prev, nex = self.content.find(self.module)
121 self.previous = prev
122 self.current = cur
123 self.next = nex
124 except NoSuchContent:
125 raise Http404
126 self.note('now', 'children', 'previous', 'current', 'next')
127
128
129 class CalendarExport(CourseInstanceMixin, BaseView):
130
131 def get(self, request, *args, **kwargs):
132 cal = icalendar.Calendar()
133 cal.add('prodid', '-// {} calendar //'.format(settings.BRAND_NAME))
134 cal.add('version', '2.0')
135 for module in self.instance.course_modules.all():
136 event = icalendar.Event()
137 event.add('summary', module.name)
138 event.add('dtstart',
139 module.closing_time - datetime.timedelta(hours=1))
140 event.add('dtend', module.closing_time)
141 event.add('dtstamp', module.closing_time)
142 event['uid'] = "module/" + str(module.id) + "/A+"
143 cal.add_component(event)
144
145 return HttpResponse(cal.to_ical(),
146 content_type="text/calendar; charset=utf-8")
147
148
149 class GroupsView(CourseInstanceMixin, BaseFormView):
150 access_mode = ACCESS.ENROLLED
151 template_name = "course/groups.html"
152 form_class = GroupsForm
153
154 def get_common_objects(self):
155 super().get_common_objects()
156 self.enrollment = self.instance.get_enrollment_for(self.request.user)
157 self.groups = list(self.profile.groups.filter(course_instance=self.instance))
158 self.note('enrollment','groups')
159
160 def get_form_kwargs(self):
161 kwargs = super().get_form_kwargs()
162 kwargs["profile"] = self.profile
163 kwargs["instance"] = self.instance
164 kwargs["content"] = self.content
165 return kwargs
166
167 def get_success_url(self):
168 return self.instance.get_url('groups')
169
170 def form_valid(self, form):
171 form.save()
172 messages.success(self.request, _("A new student group was created."))
173 return super().form_valid(form)
174
175
176 class GroupSelect(CourseInstanceMixin, BaseFormView):
177 access_mode = ACCESS.ENROLLED
178 form_class = GroupSelectForm
179 template_name = "course/_group_info.html"
180
181 def get_form_kwargs(self):
182 kwargs = super().get_form_kwargs()
183 kwargs["profile"] = self.profile
184 kwargs["instance"] = self.instance
185 return kwargs
186
187 def get_success_url(self):
188 return self.instance.get_absolute_url()
189
190 def get(self, request, *args, **kwargs):
191 return self.http_method_not_allowed(request, *args, **kwargs)
192
193 def form_invalid(self, form):
194 return HttpResponse('Invalid group selection')
195
196 def form_valid(self, form):
197 enrollment = form.save()
198 if self.request.is_ajax():
199 return self.render_to_response(self.get_context_data(
200 **group_info_context(enrollment.selected_group, self.profile)))
201 return super().form_valid(form)
202
[end of course/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/course/views.py b/course/views.py
--- a/course/views.py
+++ b/course/views.py
@@ -33,17 +33,42 @@
self.welcome_text = settings_text('WELCOME_TEXT')
self.internal_user_label = settings_text('INTERNAL_USER_LABEL')
self.external_user_label = settings_text('EXTERNAL_USER_LABEL')
- self.instances = []
- prio2 = []
- treshold = timezone.now() - datetime.timedelta(days=10)
- for instance in CourseInstance.objects.get_visible(self.request.user)\
- .filter(ending_time__gte=timezone.now()):
- if instance.starting_time > treshold:
- self.instances += [instance]
- else:
- prio2 += [instance]
- self.instances += prio2
- self.note("welcome_text", "internal_user_label", "external_user_label", "instances")
+ my_instances = []
+ all_instances = []
+ end_threshold = timezone.now() - datetime.timedelta(days=30)
+ user = self.request.user
+ is_logged_in = False
+
+ if user and user.is_authenticated:
+ is_logged_in = True
+ for instance in (CourseInstance.objects
+ .filter(course__teachers=user.userprofile,
+ ending_time__gte=end_threshold)
+ .all()):
+ my_instances.append(instance)
+
+ for instance in user.userprofile.assisting_courses.all().filter(ending_time__gte=end_threshold):
+ if instance not in my_instances:
+ my_instances.append(instance)
+
+ for instance in user.userprofile.enrolled.all().filter(ending_time__gte=end_threshold):
+ if instance not in my_instances:
+ my_instances.append(instance)
+
+ all_instances = CourseInstance.objects.get_visible(user).filter(ending_time__gte=end_threshold)
+ all_instances = [c for c in all_instances if c not in my_instances]
+
+ self.all_instances = all_instances
+ self.my_instances = my_instances
+ self.is_logged_in = is_logged_in
+
+ self.note("welcome_text",
+ "internal_user_label",
+ "external_user_label",
+ "my_instances",
+ "all_instances",
+ "is_logged_in",
+ )
class ArchiveView(UserProfileView):
|
{"golden_diff": "diff --git a/course/views.py b/course/views.py\n--- a/course/views.py\n+++ b/course/views.py\n@@ -33,17 +33,42 @@\n self.welcome_text = settings_text('WELCOME_TEXT')\n self.internal_user_label = settings_text('INTERNAL_USER_LABEL')\n self.external_user_label = settings_text('EXTERNAL_USER_LABEL')\n- self.instances = []\n- prio2 = []\n- treshold = timezone.now() - datetime.timedelta(days=10)\n- for instance in CourseInstance.objects.get_visible(self.request.user)\\\n- .filter(ending_time__gte=timezone.now()):\n- if instance.starting_time > treshold:\n- self.instances += [instance]\n- else:\n- prio2 += [instance]\n- self.instances += prio2\n- self.note(\"welcome_text\", \"internal_user_label\", \"external_user_label\", \"instances\")\n+ my_instances = []\n+ all_instances = []\n+ end_threshold = timezone.now() - datetime.timedelta(days=30)\n+ user = self.request.user\n+ is_logged_in = False\n+\n+ if user and user.is_authenticated:\n+ is_logged_in = True\n+ for instance in (CourseInstance.objects\n+ .filter(course__teachers=user.userprofile,\n+ ending_time__gte=end_threshold)\n+ .all()):\n+ my_instances.append(instance)\n+\n+ for instance in user.userprofile.assisting_courses.all().filter(ending_time__gte=end_threshold):\n+ if instance not in my_instances:\n+ my_instances.append(instance)\n+ \n+ for instance in user.userprofile.enrolled.all().filter(ending_time__gte=end_threshold):\n+ if instance not in my_instances:\n+ my_instances.append(instance)\n+ \n+ all_instances = CourseInstance.objects.get_visible(user).filter(ending_time__gte=end_threshold)\n+ all_instances = [c for c in all_instances if c not in my_instances]\n+ \n+ self.all_instances = all_instances\n+ self.my_instances = my_instances\n+ self.is_logged_in = is_logged_in\n+\n+ self.note(\"welcome_text\", \n+ \"internal_user_label\", \n+ \"external_user_label\",\n+ \"my_instances\",\n+ \"all_instances\",\n+ \"is_logged_in\",\n+ )\n \n \n class ArchiveView(UserProfileView):\n", "issue": "A+ front page\nThe A+ front page should display the name of the course instance in addition to the course name and code; Multiple instances of the same course are hard to separate in the A+ front page because the only different parts are the open/close dates and the image. Course instances have names, so that should be displayed in the front page too. Jaakko has some ideas; contact him!\n", "before_files": [{"content": "import datetime\n\nimport icalendar\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import html\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom authorization.permissions import ACCESS\nfrom exercise.cache.hierarchy import NoSuchContent\nfrom exercise.models import LearningObject\nfrom lib.helpers import settings_text\nfrom lib.viewbase import BaseTemplateView, BaseRedirectMixin, BaseFormView, BaseView\nfrom userprofile.viewbase import UserProfileView\nfrom .forms import GroupsForm, GroupSelectForm\nfrom .models import CourseInstance, Enrollment\nfrom .permissions import EnrollInfoVisiblePermission\nfrom .renders import group_info_context\nfrom .viewbase import CourseModuleBaseView, CourseInstanceMixin, EnrollableViewMixin\n\n\nclass HomeView(UserProfileView):\n access_mode = ACCESS.ANONYMOUS\n template_name = \"course/index.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.welcome_text = settings_text('WELCOME_TEXT')\n self.internal_user_label = settings_text('INTERNAL_USER_LABEL')\n self.external_user_label = settings_text('EXTERNAL_USER_LABEL')\n self.instances = []\n prio2 = []\n treshold = timezone.now() - datetime.timedelta(days=10)\n for instance in CourseInstance.objects.get_visible(self.request.user)\\\n .filter(ending_time__gte=timezone.now()):\n if instance.starting_time > treshold:\n self.instances += [instance]\n else:\n prio2 += [instance]\n self.instances += prio2\n self.note(\"welcome_text\", \"internal_user_label\", \"external_user_label\", \"instances\")\n\n\nclass ArchiveView(UserProfileView):\n access_mode = ACCESS.ANONYMOUS\n template_name = \"course/archive.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.instances = CourseInstance.objects.get_visible(self.request.user)\n self.note(\"instances\")\n\nclass InstanceView(EnrollableViewMixin, BaseTemplateView):\n access_mode = ACCESS.STUDENT\n # ACCESS.STUDENT requires users to log in, but the access mode is dropped\n # in public courses. CourseVisiblePermission has more restrictions as well.\n template_name = \"course/course.html\"\n\n def handle_no_permission(self):\n if self.request.user.is_authenticated \\\n and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED:\n # The course instance is visible to only enrolled students, so\n # redirect the user to the enroll page instead of showing\n # a 403 Forbidden error.\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n\n def get(self, request, *args, **kwargs):\n # external LTI Tool Providers may return the user to the course instance view\n # with a message given in GET query parameters\n lti_error_msg = request.GET.get('lti_errormsg')\n lti_msg = request.GET.get('lti_msg')\n # message HTML is not escaped in the templates so escape it here\n if lti_error_msg:\n messages.error(request, html.escape(lti_error_msg))\n elif lti_msg:\n messages.info(request, html.escape(lti_msg))\n\n return super().get(request, *args, **kwargs)\n\n\nclass Enroll(EnrollableViewMixin, BaseRedirectMixin, BaseTemplateView):\n permission_classes = [EnrollInfoVisiblePermission]\n course_permission_classes = []\n template_name = \"course/enroll.html\"\n\n def post(self, request, *args, **kwargs):\n\n if self.is_student or not self.enrollable:\n messages.error(self.request, _(\"You cannot enroll, or have already enrolled, in this course.\"))\n raise PermissionDenied()\n\n if not self.instance.is_enrollment_open():\n messages.error(self.request, _(\"The enrollment is not open.\"))\n raise PermissionDenied()\n\n # Support enrollment questionnaires.\n exercise = LearningObject.objects.find_enrollment_exercise(\n self.instance, self.profile)\n if exercise:\n return self.redirect(exercise.get_absolute_url())\n\n self.instance.enroll_student(self.request.user)\n return self.redirect(self.instance.get_absolute_url())\n\n\nclass ModuleView(CourseModuleBaseView):\n template_name = \"course/module.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.now = timezone.now()\n try:\n self.children = self.content.flat_module(self.module)\n cur, tree, prev, nex = self.content.find(self.module)\n self.previous = prev\n self.current = cur\n self.next = nex\n except NoSuchContent:\n raise Http404\n self.note('now', 'children', 'previous', 'current', 'next')\n\n\nclass CalendarExport(CourseInstanceMixin, BaseView):\n\n def get(self, request, *args, **kwargs):\n cal = icalendar.Calendar()\n cal.add('prodid', '-// {} calendar //'.format(settings.BRAND_NAME))\n cal.add('version', '2.0')\n for module in self.instance.course_modules.all():\n event = icalendar.Event()\n event.add('summary', module.name)\n event.add('dtstart',\n module.closing_time - datetime.timedelta(hours=1))\n event.add('dtend', module.closing_time)\n event.add('dtstamp', module.closing_time)\n event['uid'] = \"module/\" + str(module.id) + \"/A+\"\n cal.add_component(event)\n\n return HttpResponse(cal.to_ical(),\n content_type=\"text/calendar; charset=utf-8\")\n\n\nclass GroupsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.ENROLLED\n template_name = \"course/groups.html\"\n form_class = GroupsForm\n\n def get_common_objects(self):\n super().get_common_objects()\n self.enrollment = self.instance.get_enrollment_for(self.request.user)\n self.groups = list(self.profile.groups.filter(course_instance=self.instance))\n self.note('enrollment','groups')\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"profile\"] = self.profile\n kwargs[\"instance\"] = self.instance\n kwargs[\"content\"] = self.content\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url('groups')\n\n def form_valid(self, form):\n form.save()\n messages.success(self.request, _(\"A new student group was created.\"))\n return super().form_valid(form)\n\n\nclass GroupSelect(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.ENROLLED\n form_class = GroupSelectForm\n template_name = \"course/_group_info.html\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"profile\"] = self.profile\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_absolute_url()\n\n def get(self, request, *args, **kwargs):\n return self.http_method_not_allowed(request, *args, **kwargs)\n\n def form_invalid(self, form):\n return HttpResponse('Invalid group selection')\n\n def form_valid(self, form):\n enrollment = form.save()\n if self.request.is_ajax():\n return self.render_to_response(self.get_context_data(\n **group_info_context(enrollment.selected_group, self.profile)))\n return super().form_valid(form)\n", "path": "course/views.py"}]}
| 2,727 | 503 |
gh_patches_debug_18035
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2628
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failed ACH transfers aren't reflected in user balance
When payday initiates an ACH transfer for a user's balance, that user's balance is immediately reduced by the amount of the transfer. The problem come on the rare occasions where the ACH fails. The only notification we get (that I know about) is an email from Balanced about ACH failures. We need to restore funds to the user's account when the transfer did not, in fact, occur.
</issue>
<code>
[start of gittip/security/csrf.py]
1 """Cross Site Request Forgery middleware, borrowed from Django.
2
3 See also:
4
5 https://github.com/django/django/blob/master/django/middleware/csrf.py
6 https://docs.djangoproject.com/en/dev/ref/contrib/csrf/
7 https://github.com/gittip/www.gittip.com/issues/88
8
9 """
10
11 from datetime import timedelta
12 import re
13 import urlparse
14 from aspen import log_dammit
15
16
17 #from django.utils.cache import patch_vary_headers
18 cc_delim_re = re.compile(r'\s*,\s*')
19 def patch_vary_headers(response, newheaders):
20 """
21 Adds (or updates) the "Vary" header in the given HttpResponse object.
22 newheaders is a list of header names that should be in "Vary". Existing
23 headers in "Vary" aren't removed.
24 """
25 # Note that we need to keep the original order intact, because cache
26 # implementations may rely on the order of the Vary contents in, say,
27 # computing an MD5 hash.
28 if 'Vary' in response.headers:
29 vary_headers = cc_delim_re.split(response.headers['Vary'])
30 else:
31 vary_headers = []
32 # Use .lower() here so we treat headers as case-insensitive.
33 existing_headers = set([header.lower() for header in vary_headers])
34 additional_headers = [newheader for newheader in newheaders
35 if newheader.lower() not in existing_headers]
36 response.headers['Vary'] = ', '.join(vary_headers + additional_headers)
37
38
39 #from django.utils.http import same_origin
40 def same_origin(url1, url2):
41 """
42 Checks if two URLs are 'same-origin'
43 """
44 p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
45 return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
46
47
48 from aspen import Response
49 from crypto import constant_time_compare, get_random_string
50
51 REASON_NO_REFERER = "Referer checking failed - no Referer."
52 REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
53 REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
54 REASON_BAD_TOKEN = "CSRF token missing or incorrect."
55
56 TOKEN_LENGTH = 32
57 CSRF_TIMEOUT = timedelta(days=7)
58
59
60 def _get_new_csrf_key():
61 return get_random_string(TOKEN_LENGTH)
62
63
64 def _sanitize_token(token):
65 # Allow only alphanum, and ensure we return a 'str' for the sake
66 # of the post processing middleware.
67 if len(token) > TOKEN_LENGTH:
68 return _get_new_csrf_key()
69 token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))
70 if token == "":
71 # In case the cookie has been truncated to nothing at some point.
72 return _get_new_csrf_key()
73 return token
74
75 def _is_secure(request):
76 import gittip
77 return gittip.canonical_scheme == 'https'
78
79 def _get_host(request):
80 """Returns the HTTP host using the request headers.
81 """
82 return request.headers.get('X-Forwarded-Host', request.headers['Host'])
83
84
85
86 def inbound(request):
87 """Given a Request object, reject it if it's a forgery.
88 """
89 if request.line.uri.startswith('/assets/'): return
90
91 try:
92 csrf_token = request.headers.cookie.get('csrf_token')
93 csrf_token = '' if csrf_token is None else csrf_token.value
94 csrf_token = _sanitize_token(csrf_token)
95 except KeyError:
96 csrf_token = _get_new_csrf_key()
97
98 request.context['csrf_token'] = csrf_token
99
100 # Assume that anything not defined as 'safe' by RC2616 needs protection
101 if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
102
103 if _is_secure(request):
104 # Suppose user visits http://example.com/
105 # An active network attacker (man-in-the-middle, MITM) sends a
106 # POST form that targets https://example.com/detonate-bomb/ and
107 # submits it via JavaScript.
108 #
109 # The attacker will need to provide a CSRF cookie and token, but
110 # that's no problem for a MITM and the session-independent
111 # nonce we're using. So the MITM can circumvent the CSRF
112 # protection. This is true for any HTTP connection, but anyone
113 # using HTTPS expects better! For this reason, for
114 # https://example.com/ we need additional protection that treats
115 # http://example.com/ as completely untrusted. Under HTTPS,
116 # Barth et al. found that the Referer header is missing for
117 # same-domain requests in only about 0.2% of cases or less, so
118 # we can use strict Referer checking.
119 referer = request.headers.get('Referer')
120 if referer is None:
121 raise Response(403, REASON_NO_REFERER)
122
123 good_referer = 'https://%s/' % _get_host(request)
124 if not same_origin(referer, good_referer):
125 reason = REASON_BAD_REFERER % (referer, good_referer)
126 log_dammit(reason)
127 raise Response(403, reason)
128
129 if csrf_token is None:
130 raise Response(403, REASON_NO_CSRF_COOKIE)
131
132 # Check non-cookie token for match.
133 request_csrf_token = ""
134 if request.line.method == "POST":
135 request_csrf_token = request.body.get('csrf_token', '')
136
137 if request_csrf_token == "":
138 # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,
139 # and possible for PUT/DELETE.
140 request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')
141
142 if not constant_time_compare(request_csrf_token, csrf_token):
143 raise Response(403, REASON_BAD_TOKEN)
144
145
146 def outbound(request, response):
147 """Store the latest CSRF token as a cookie.
148 """
149 csrf_token = request.context.get('csrf_token')
150 if csrf_token:
151 response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)
152
153 # Content varies with the CSRF cookie, so set the Vary header.
154 patch_vary_headers(response, ('Cookie',))
155
[end of gittip/security/csrf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gittip/security/csrf.py b/gittip/security/csrf.py
--- a/gittip/security/csrf.py
+++ b/gittip/security/csrf.py
@@ -87,15 +87,14 @@
"""Given a Request object, reject it if it's a forgery.
"""
if request.line.uri.startswith('/assets/'): return
+ if request.line.uri.startswith('/callbacks/'): return
try:
- csrf_token = request.headers.cookie.get('csrf_token')
- csrf_token = '' if csrf_token is None else csrf_token.value
- csrf_token = _sanitize_token(csrf_token)
+ csrf_token = _sanitize_token(request.headers.cookie['csrf_token'].value)
except KeyError:
- csrf_token = _get_new_csrf_key()
+ csrf_token = None
- request.context['csrf_token'] = csrf_token
+ request.context['csrf_token'] = csrf_token or _get_new_csrf_key()
# Assume that anything not defined as 'safe' by RC2616 needs protection
if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
|
{"golden_diff": "diff --git a/gittip/security/csrf.py b/gittip/security/csrf.py\n--- a/gittip/security/csrf.py\n+++ b/gittip/security/csrf.py\n@@ -87,15 +87,14 @@\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n+ if request.line.uri.startswith('/callbacks/'): return\n \n try:\n- csrf_token = request.headers.cookie.get('csrf_token')\n- csrf_token = '' if csrf_token is None else csrf_token.value\n- csrf_token = _sanitize_token(csrf_token)\n+ csrf_token = _sanitize_token(request.headers.cookie['csrf_token'].value)\n except KeyError:\n- csrf_token = _get_new_csrf_key()\n+ csrf_token = None\n \n- request.context['csrf_token'] = csrf_token\n+ request.context['csrf_token'] = csrf_token or _get_new_csrf_key()\n \n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n", "issue": "Failed ACH transfers aren't reflected in user balance\nWhen payday initiates an ACH transfer for a user's balance, that user's balance is immediately reduced by the amount of the transfer. The problem come on the rare occasions where the ACH fails. The only notification we get (that I know about) is an email from Balanced about ACH failures. We need to restore funds to the user's account when the transfer did not, in fact, occur.\n\n", "before_files": [{"content": "\"\"\"Cross Site Request Forgery middleware, borrowed from Django.\n\nSee also:\n\n https://github.com/django/django/blob/master/django/middleware/csrf.py\n https://docs.djangoproject.com/en/dev/ref/contrib/csrf/\n https://github.com/gittip/www.gittip.com/issues/88\n\n\"\"\"\n\nfrom datetime import timedelta\nimport re\nimport urlparse\nfrom aspen import log_dammit\n\n\n#from django.utils.cache import patch_vary_headers\ncc_delim_re = re.compile(r'\\s*,\\s*')\ndef patch_vary_headers(response, newheaders):\n \"\"\"\n Adds (or updates) the \"Vary\" header in the given HttpResponse object.\n newheaders is a list of header names that should be in \"Vary\". Existing\n headers in \"Vary\" aren't removed.\n \"\"\"\n # Note that we need to keep the original order intact, because cache\n # implementations may rely on the order of the Vary contents in, say,\n # computing an MD5 hash.\n if 'Vary' in response.headers:\n vary_headers = cc_delim_re.split(response.headers['Vary'])\n else:\n vary_headers = []\n # Use .lower() here so we treat headers as case-insensitive.\n existing_headers = set([header.lower() for header in vary_headers])\n additional_headers = [newheader for newheader in newheaders\n if newheader.lower() not in existing_headers]\n response.headers['Vary'] = ', '.join(vary_headers + additional_headers)\n\n\n#from django.utils.http import same_origin\ndef same_origin(url1, url2):\n \"\"\"\n Checks if two URLs are 'same-origin'\n \"\"\"\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)\n\n\nfrom aspen import Response\nfrom crypto import constant_time_compare, get_random_string\n\nREASON_NO_REFERER = \"Referer checking failed - no Referer.\"\nREASON_BAD_REFERER = \"Referer checking failed - %s does not match %s.\"\nREASON_NO_CSRF_COOKIE = \"CSRF cookie not set.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\nTOKEN_LENGTH = 32\nCSRF_TIMEOUT = timedelta(days=7)\n\n\ndef _get_new_csrf_key():\n return get_random_string(TOKEN_LENGTH)\n\n\ndef _sanitize_token(token):\n # Allow only alphanum, and ensure we return a 'str' for the sake\n # of the post processing middleware.\n if len(token) > TOKEN_LENGTH:\n return _get_new_csrf_key()\n token = re.sub('[^a-zA-Z0-9]+', '', str(token.decode('ascii', 'ignore')))\n if token == \"\":\n # In case the cookie has been truncated to nothing at some point.\n return _get_new_csrf_key()\n return token\n\ndef _is_secure(request):\n import gittip\n return gittip.canonical_scheme == 'https'\n\ndef _get_host(request):\n \"\"\"Returns the HTTP host using the request headers.\n \"\"\"\n return request.headers.get('X-Forwarded-Host', request.headers['Host'])\n\n\n\ndef inbound(request):\n \"\"\"Given a Request object, reject it if it's a forgery.\n \"\"\"\n if request.line.uri.startswith('/assets/'): return\n\n try:\n csrf_token = request.headers.cookie.get('csrf_token')\n csrf_token = '' if csrf_token is None else csrf_token.value\n csrf_token = _sanitize_token(csrf_token)\n except KeyError:\n csrf_token = _get_new_csrf_key()\n\n request.context['csrf_token'] = csrf_token\n\n # Assume that anything not defined as 'safe' by RC2616 needs protection\n if request.line.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n\n if _is_secure(request):\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = request.headers.get('Referer')\n if referer is None:\n raise Response(403, REASON_NO_REFERER)\n\n good_referer = 'https://%s/' % _get_host(request)\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n log_dammit(reason)\n raise Response(403, reason)\n\n if csrf_token is None:\n raise Response(403, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.line.method == \"POST\":\n request_csrf_token = request.body.get('csrf_token', '')\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRF-TOKEN, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.headers.get('X-CSRF-TOKEN', '')\n\n if not constant_time_compare(request_csrf_token, csrf_token):\n raise Response(403, REASON_BAD_TOKEN)\n\n\ndef outbound(request, response):\n \"\"\"Store the latest CSRF token as a cookie.\n \"\"\"\n csrf_token = request.context.get('csrf_token')\n if csrf_token:\n response.set_cookie('csrf_token', csrf_token, expires=CSRF_TIMEOUT, httponly=False)\n\n # Content varies with the CSRF cookie, so set the Vary header.\n patch_vary_headers(response, ('Cookie',))\n", "path": "gittip/security/csrf.py"}]}
| 2,376 | 252 |
gh_patches_debug_14676
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-6751
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[APP SUBMITTED]: AttributeError: 'NoneType' object has no attribute 'find_all'
### INFO
**Python Version**: `3.7.3rc1 (default, Mar 13 2019, 11:01:15) [GCC 8.2.0]`
**Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid`
**Locale**: `UTF-8`
**Branch**: [develop](../tree/develop)
**Database**: `44.14`
**Commit**: pymedusa/Medusa@429bbf9f1c90de20036a86e3512ed895a0fe5f2c
**Link to Log**: https://gist.github.com/63aae653b5bade13d837f97ec5089a0f
### ERROR
<pre>
2019-05-26 11:01:29 ERROR SEARCHQUEUE-DAILY-SEARCH :: [BTDB] :: [429bbf9] DailySearchQueueItem Exception, error: 'NoneType' object has no attribute 'find_all'
Traceback (most recent call last):
File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/queue.py#L263">medusa/search/queue.py</a>", line 263, in run
found_results = search_for_needed_episodes(self.scheduler_start_time, force=self.force)
File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/core.py#L524">medusa/search/core.py</a>", line 524, in search_for_needed_episodes
cur_provider.cache.update_cache(scheduler_start_time)
File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L207">medusa/tv/cache.py</a>", line 207, in update_cache
data = self._get_rss_data()
File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L190">medusa/tv/cache.py</a>", line 190, in _get_rss_data
return {'entries': self.provider.search(self.search_params)}
File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L78">medusa/providers/torrent/html/btdb.py</a>", line 78, in search
results += self.parse(response.text, mode)
File "/home/pi/Medusa/<a href="../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L99">medusa/providers/torrent/html/btdb.py</a>", line 99, in parse
torrent_rows = table_body.find_all('li', class_='recent-item')
AttributeError: 'NoneType' object has no attribute 'find_all'
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
</issue>
<code>
[start of medusa/providers/torrent/html/btdb.py]
1 # coding=utf-8
2
3 """Provider code for BTDB."""
4
5 from __future__ import unicode_literals
6
7 import logging
8
9 from medusa import tv
10 from medusa.bs4_parser import BS4Parser
11 from medusa.helper.common import convert_size
12 from medusa.logger.adapters.style import BraceAdapter
13 from medusa.providers.torrent.torrent_provider import TorrentProvider
14
15 from requests.compat import urljoin
16
17 log = BraceAdapter(logging.getLogger(__name__))
18 log.logger.addHandler(logging.NullHandler())
19
20
21 class BTDBProvider(TorrentProvider):
22 """BTDB Torrent provider."""
23
24 def __init__(self):
25 """Initialize the class."""
26 super(BTDBProvider, self).__init__('BTDB')
27
28 # Credentials
29 self.public = True
30
31 # URLs
32 self.url = 'https://btdb.eu'
33 self.urls = {
34 'daily': urljoin(self.url, 'recent'),
35 }
36
37 # Miscellaneous Options
38
39 # Cache
40 self.cache = tv.Cache(self, min_time=20)
41
42 def search(self, search_strings, age=0, ep_obj=None, **kwargs):
43 """
44 Search a provider and parse the results.
45
46 :param search_strings: A dict with mode (key) and the search value (value)
47 :param age: Not used
48 :param ep_obj: Not used
49 :returns: A list of search results (structure)
50 """
51 results = []
52
53 # Search Params
54 search_params = {
55 'category': 'show',
56 }
57
58 for mode in search_strings:
59 log.debug('Search mode: {0}', mode)
60
61 for search_string in search_strings[mode]:
62 search_url = self.urls['daily']
63
64 if mode != 'RSS':
65 search_url = self.url
66
67 search_params['search'] = search_string
68 search_params['sort'] = 'popular'
69
70 log.debug('Search string: {search}',
71 {'search': search_string})
72
73 response = self.session.get(search_url, params=search_params)
74 if not response or not response.text:
75 log.debug('No data returned from provider')
76 continue
77
78 results += self.parse(response.text, mode)
79
80 return results
81
82 def parse(self, data, mode):
83 """
84 Parse search results for items.
85
86 :param data: The raw response from a search
87 :param mode: The current mode used to search, e.g. RSS
88
89 :return: A list of items found
90 """
91 items = []
92
93 with BS4Parser(data, 'html5lib') as html:
94 if mode != 'RSS':
95 table_body = html.find('div', class_='search-ret')
96 torrent_rows = table_body.find_all('li', class_='search-ret-item')
97 else:
98 table_body = html.find('div', class_='recent')
99 torrent_rows = table_body.find_all('li', class_='recent-item')
100
101 # Continue only if at least one release is found
102 if not table_body:
103 log.debug('Data returned from provider does not contain any torrents')
104 return items
105
106 for row in torrent_rows:
107 try:
108
109 title = row.h2.find('a').get('title')
110 download_url = row.div.find('a').get('href')
111 if not all([title, download_url]):
112 continue
113
114 download_url += self._custom_trackers
115
116 spans = row.find('div').find_all('span')
117
118 seeders = int(spans[3].get_text().replace(',', ''))
119 leechers = int(spans[4].get_text().replace(',', ''))
120
121 torrent_size = spans[0].get_text()
122 size = convert_size(torrent_size, default=-1)
123
124 pubdate_raw = spans[2].get_text()
125 pubdate = self.parse_pubdate(pubdate_raw)
126
127 item = {
128 'title': title,
129 'link': download_url,
130 'size': size,
131 'seeders': seeders,
132 'leechers': leechers,
133 'pubdate': pubdate,
134 }
135
136 if mode != 'RSS':
137 log.debug('Found result: {0} with {1} seeders and {2} leechers',
138 title, seeders, leechers)
139
140 items.append(item)
141 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
142 log.exception('Failed parsing provider.')
143
144 return items
145
146
147 provider = BTDBProvider()
148
[end of medusa/providers/torrent/html/btdb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/providers/torrent/html/btdb.py b/medusa/providers/torrent/html/btdb.py
--- a/medusa/providers/torrent/html/btdb.py
+++ b/medusa/providers/torrent/html/btdb.py
@@ -91,12 +91,11 @@
items = []
with BS4Parser(data, 'html5lib') as html:
- if mode != 'RSS':
- table_body = html.find('div', class_='search-ret')
- torrent_rows = table_body.find_all('li', class_='search-ret-item')
- else:
- table_body = html.find('div', class_='recent')
- torrent_rows = table_body.find_all('li', class_='recent-item')
+ cls_name = 'search-ret' if mode != 'RSS' else 'recent'
+ table_body = html.find('div', class_=cls_name)
+ torrent_rows = table_body.find_all(
+ 'li', class_='{0}-item'.format(cls_name)
+ ) if table_body else []
# Continue only if at least one release is found
if not table_body:
|
{"golden_diff": "diff --git a/medusa/providers/torrent/html/btdb.py b/medusa/providers/torrent/html/btdb.py\n--- a/medusa/providers/torrent/html/btdb.py\n+++ b/medusa/providers/torrent/html/btdb.py\n@@ -91,12 +91,11 @@\n items = []\n \n with BS4Parser(data, 'html5lib') as html:\n- if mode != 'RSS':\n- table_body = html.find('div', class_='search-ret')\n- torrent_rows = table_body.find_all('li', class_='search-ret-item')\n- else:\n- table_body = html.find('div', class_='recent')\n- torrent_rows = table_body.find_all('li', class_='recent-item')\n+ cls_name = 'search-ret' if mode != 'RSS' else 'recent'\n+ table_body = html.find('div', class_=cls_name)\n+ torrent_rows = table_body.find_all(\n+ 'li', class_='{0}-item'.format(cls_name)\n+ ) if table_body else []\n \n # Continue only if at least one release is found\n if not table_body:\n", "issue": "[APP SUBMITTED]: AttributeError: 'NoneType' object has no attribute 'find_all'\n\n### INFO\n**Python Version**: `3.7.3rc1 (default, Mar 13 2019, 11:01:15) [GCC 8.2.0]`\n**Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid`\n**Locale**: `UTF-8`\n**Branch**: [develop](../tree/develop)\n**Database**: `44.14`\n**Commit**: pymedusa/Medusa@429bbf9f1c90de20036a86e3512ed895a0fe5f2c\n**Link to Log**: https://gist.github.com/63aae653b5bade13d837f97ec5089a0f\n### ERROR\n<pre>\n2019-05-26 11:01:29 ERROR SEARCHQUEUE-DAILY-SEARCH :: [BTDB] :: [429bbf9] DailySearchQueueItem Exception, error: 'NoneType' object has no attribute 'find_all'\nTraceback (most recent call last):\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/queue.py#L263\">medusa/search/queue.py</a>\", line 263, in run\n found_results = search_for_needed_episodes(self.scheduler_start_time, force=self.force)\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/search/core.py#L524\">medusa/search/core.py</a>\", line 524, in search_for_needed_episodes\n cur_provider.cache.update_cache(scheduler_start_time)\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L207\">medusa/tv/cache.py</a>\", line 207, in update_cache\n data = self._get_rss_data()\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/tv/cache.py#L190\">medusa/tv/cache.py</a>\", line 190, in _get_rss_data\n return {'entries': self.provider.search(self.search_params)}\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L78\">medusa/providers/torrent/html/btdb.py</a>\", line 78, in search\n results += self.parse(response.text, mode)\n File \"/home/pi/Medusa/<a href=\"../blob/429bbf9f1c90de20036a86e3512ed895a0fe5f2c/medusa/providers/torrent/html/btdb.py#L99\">medusa/providers/torrent/html/btdb.py</a>\", line 99, in parse\n torrent_rows = table_body.find_all('li', class_='recent-item')\nAttributeError: 'NoneType' object has no attribute 'find_all'\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for BTDB.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BTDBProvider(TorrentProvider):\n \"\"\"BTDB Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BTDBProvider, self).__init__('BTDB')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://btdb.eu'\n self.urls = {\n 'daily': urljoin(self.url, 'recent'),\n }\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=20)\n\n def search(self, search_strings, age=0, ep_obj=None, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n\n # Search Params\n search_params = {\n 'category': 'show',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n for search_string in search_strings[mode]:\n search_url = self.urls['daily']\n\n if mode != 'RSS':\n search_url = self.url\n\n search_params['search'] = search_string\n search_params['sort'] = 'popular'\n\n log.debug('Search string: {search}',\n {'search': search_string})\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n if mode != 'RSS':\n table_body = html.find('div', class_='search-ret')\n torrent_rows = table_body.find_all('li', class_='search-ret-item')\n else:\n table_body = html.find('div', class_='recent')\n torrent_rows = table_body.find_all('li', class_='recent-item')\n\n # Continue only if at least one release is found\n if not table_body:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n try:\n\n title = row.h2.find('a').get('title')\n download_url = row.div.find('a').get('href')\n if not all([title, download_url]):\n continue\n\n download_url += self._custom_trackers\n\n spans = row.find('div').find_all('span')\n\n seeders = int(spans[3].get_text().replace(',', ''))\n leechers = int(spans[4].get_text().replace(',', ''))\n\n torrent_size = spans[0].get_text()\n size = convert_size(torrent_size, default=-1)\n\n pubdate_raw = spans[2].get_text()\n pubdate = self.parse_pubdate(pubdate_raw)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.exception('Failed parsing provider.')\n\n return items\n\n\nprovider = BTDBProvider()\n", "path": "medusa/providers/torrent/html/btdb.py"}]}
| 2,745 | 253 |
gh_patches_debug_31047
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-11189
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0
### Package and Environment Details
* Package Name/Version: **libpciaccess/0.16**
* Operating System+version: **Fedora 34 x86_64**
* Compiler+version: **gcc 11.3.1**
* Conan version: **conan 1.49.0**
* Python version: **Python 3.9.13**
### Conan profile
```
Configuration for profile default:
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=11
compiler.libcxx=libstdc++11
build_type=Release
[options]
[conf]
[build_requires]
[env]
```
### Steps to reproduce
Simply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.
Expected result:
libpciaccess/0.16 is successfully built and made available in conan cache.
Actual result:
Build fails with the following error:
```
libpciaccess/0.16:
libpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed
libpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646
ERROR: libpciaccess/0.16: Error in build() method, line 66
autotools = Autotools(self, build_script_folder=self._source_subfolder)
TypeError: __init__() got an unexpected keyword argument 'build_script_folder'
```
The problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?
### Logs
_No response_
</issue>
<code>
[start of recipes/libpciaccess/all/conanfile.py]
1 import os
2
3 from conan.tools.gnu import Autotools, AutotoolsToolchain
4 from conans import ConanFile, tools
5 from conans.errors import ConanInvalidConfiguration
6
7 required_conan_version = ">=1.33.0"
8
9
10 class LibPciAccessConan(ConanFile):
11 name = "libpciaccess"
12 description = "Generic PCI access library"
13 topics = ("pci", "xorg")
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://gitlab.freedesktop.org/xorg/lib/libpciaccess"
16 license = "MIT", "X11"
17
18 settings = "os", "arch", "compiler", "build_type"
19 options = {"shared": [True, False], "fPIC": [True, False]}
20 default_options = {"shared": False, "fPIC": True}
21
22 _source_subfolder = "source_subfolder"
23 _build_subfolder = "build_subfolder"
24
25 def validate(self):
26 def is_supported(settings):
27 if settings.os in ("Linux", "FreeBSD", "SunOS"):
28 return True
29 return settings.os == "Windows" and settings.get_safe("os.subsystem") == "cygwin"
30 if not is_supported(self.settings):
31 raise ConanInvalidConfiguration("Unsupported architecture.")
32
33 def config_options(self):
34 if self.settings.os == "Windows":
35 del self.options.fPIC
36
37 def configure(self):
38 if self.options.shared:
39 del self.options.fPIC
40 del self.settings.compiler.libcxx
41 del self.settings.compiler.cppstd
42
43 def build_requirements(self):
44 self.build_requires("pkgconf/1.7.4")
45 self.build_requires("xorg-macros/1.19.3")
46 self.build_requires("libtool/2.4.6")
47
48 def generate(self):
49 tc = AutotoolsToolchain(self)
50 tc.default_configure_install_args = True
51 tc.generate()
52
53 def source(self):
54 tools.get(**self.conan_data["sources"][self.version],
55 strip_root=True, destination=self._source_subfolder)
56
57 def build(self):
58 # autoreconf
59 self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"),
60 win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)
61
62 autotools = Autotools(self)
63 autotools.configure(build_script_folder=self._source_subfolder)
64 autotools.make()
65
66 def package(self):
67 self.copy(pattern="COPYING", dst="licenses",
68 src=self._source_subfolder)
69
70 autotools = Autotools(self)
71 autotools.install()
72
73 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
74 tools.remove_files_by_mask(os.path.join(
75 self.package_folder, "lib"), "*.la")
76
77 def package_info(self):
78 self.cpp_info.libs = tools.collect_libs(self)
79 self.cpp_info.set_property("pkg_config_name", "pciaccess")
80
[end of recipes/libpciaccess/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py
--- a/recipes/libpciaccess/all/conanfile.py
+++ b/recipes/libpciaccess/all/conanfile.py
@@ -1,10 +1,10 @@
import os
-
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
-required_conan_version = ">=1.33.0"
+
+required_conan_version = ">=1.44.0"
class LibPciAccessConan(ConanFile):
@@ -19,8 +19,9 @@
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
- _source_subfolder = "source_subfolder"
- _build_subfolder = "build_subfolder"
+ @property
+ def _source_subfolder(self):
+ return "source_subfolder"
def validate(self):
def is_supported(settings):
@@ -55,7 +56,6 @@
strip_root=True, destination=self._source_subfolder)
def build(self):
- # autoreconf
self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"),
win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)
@@ -75,5 +75,5 @@
self.package_folder, "lib"), "*.la")
def package_info(self):
- self.cpp_info.libs = tools.collect_libs(self)
+ self.cpp_info.libs = ["pciaccess"]
self.cpp_info.set_property("pkg_config_name", "pciaccess")
|
{"golden_diff": "diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py\n--- a/recipes/libpciaccess/all/conanfile.py\n+++ b/recipes/libpciaccess/all/conanfile.py\n@@ -1,10 +1,10 @@\n import os\n-\n from conan.tools.gnu import Autotools, AutotoolsToolchain\n from conans import ConanFile, tools\n from conans.errors import ConanInvalidConfiguration\n \n-required_conan_version = \">=1.33.0\"\n+\n+required_conan_version = \">=1.44.0\"\n \n \n class LibPciAccessConan(ConanFile):\n@@ -19,8 +19,9 @@\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n \n- _source_subfolder = \"source_subfolder\"\n- _build_subfolder = \"build_subfolder\"\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n \n def validate(self):\n def is_supported(settings):\n@@ -55,7 +56,6 @@\n strip_root=True, destination=self._source_subfolder)\n \n def build(self):\n- # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n \n@@ -75,5 +75,5 @@\n self.package_folder, \"lib\"), \"*.la\")\n \n def package_info(self):\n- self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.libs = [\"pciaccess\"]\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "issue": "[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0\n### Package and Environment Details\r\n\r\n* Package Name/Version: **libpciaccess/0.16**\r\n* Operating System+version: **Fedora 34 x86_64**\r\n* Compiler+version: **gcc 11.3.1**\r\n* Conan version: **conan 1.49.0**\r\n* Python version: **Python 3.9.13**\r\n\r\n\r\n### Conan profile\r\n\r\n```\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=11\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\nSimply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.\r\n\r\nExpected result:\r\nlibpciaccess/0.16 is successfully built and made available in conan cache.\r\n\r\nActual result:\r\nBuild fails with the following error:\r\n\r\n```\r\nlibpciaccess/0.16: \r\nlibpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed\r\nlibpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646\r\nERROR: libpciaccess/0.16: Error in build() method, line 66\r\n\tautotools = Autotools(self, build_script_folder=self._source_subfolder)\r\n\tTypeError: __init__() got an unexpected keyword argument 'build_script_folder'\r\n\r\n```\r\nThe problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?\r\n\r\n### Logs\r\n\r\n_No response_\n", "before_files": [{"content": "import os\n\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibPciAccessConan(ConanFile):\n name = \"libpciaccess\"\n description = \"Generic PCI access library\"\n topics = (\"pci\", \"xorg\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://gitlab.freedesktop.org/xorg/lib/libpciaccess\"\n license = \"MIT\", \"X11\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def validate(self):\n def is_supported(settings):\n if settings.os in (\"Linux\", \"FreeBSD\", \"SunOS\"):\n return True\n return settings.os == \"Windows\" and settings.get_safe(\"os.subsystem\") == \"cygwin\"\n if not is_supported(self.settings):\n raise ConanInvalidConfiguration(\"Unsupported architecture.\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n self.build_requires(\"xorg-macros/1.19.3\")\n self.build_requires(\"libtool/2.4.6\")\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.default_configure_install_args = True\n tc.generate()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self._source_subfolder)\n\n def build(self):\n # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.configure(build_script_folder=self._source_subfolder)\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(\n self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "path": "recipes/libpciaccess/all/conanfile.py"}]}
| 1,881 | 404 |
gh_patches_debug_18621
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection3d-474
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bugs in configs in configs/benchmark/ folder
Thanks for your error report and we appreciate it a lot.
**Checklist**
1. I have searched related issues but cannot get the expected help.
2. The bug has not been fixed in the latest version.
**Describe the bug**
A clear and concise description of what the bug is.
**Reproduction**
1. What command or script did you run?
```
bash ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate
```
2. Did you make any modifications on the code or config? Did you understand what you have modified?
3. What dataset did you use?
Kitti
**Environment**
1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment infomation and paste it here.

2. You may add addition that may be helpful for locating the problem, such as
- How you installed PyTorch [e.g., pip, conda, source]
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
**Error traceback**
If applicable, paste the error trackback here.
```
File "projects/mmcv/mmcv/utils/registry.py", line 54, in build_from_cfg
self.pipeline = Compose(pipeline)
File "anaconda3/envs/mmdet3d/lib/python3.7/site-packages/mmdet/datasets/pipelines/compose.py", line 22, in __init__
raise type(e)(f'{obj_cls.__name__}: {e}')
KeyError: "KittiDataset: 'GlobalRotScale is not in the pipeline registry'"
```
**Bug fix**
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
The arguments' name and some type of settings are not updated in benchmark folder.
Inconsistency interface between config and function
Dear developers:
I found there exists an inconsistent interface of "ObjectNoise" between "mmdet3d/datasets/pipelines/transforms_3d.py" and "configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py".
Especially, I found that the options "loc_noise_std" and "rot_uniform_noise" mentioned in the config file do not appear in the corresponding function.
I think maybe it is a new feature in progress. I appreciate developers who maintain this repo and contribute to this issue.
</issue>
<code>
[start of configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py]
1 # model settings
2 voxel_size = [0.16, 0.16, 4]
3 point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
4 model = dict(
5 type='VoxelNet',
6 voxel_layer=dict(
7 max_num_points=64,
8 point_cloud_range=point_cloud_range,
9 voxel_size=voxel_size,
10 max_voxels=(12000, 20000)),
11 voxel_encoder=dict(
12 type='PillarFeatureNet',
13 in_channels=4,
14 feat_channels=[64],
15 with_distance=False,
16 voxel_size=voxel_size,
17 point_cloud_range=point_cloud_range),
18 middle_encoder=dict(
19 type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]),
20 backbone=dict(
21 type='SECOND',
22 in_channels=64,
23 layer_nums=[3, 5, 5],
24 layer_strides=[2, 2, 2],
25 out_channels=[64, 128, 256]),
26 neck=dict(
27 type='SECONDFPN',
28 in_channels=[64, 128, 256],
29 upsample_strides=[1, 2, 4],
30 out_channels=[128, 128, 128]),
31 bbox_head=dict(
32 type='Anchor3DHead',
33 num_classes=1,
34 in_channels=384,
35 feat_channels=384,
36 use_direction_classifier=True,
37 anchor_generator=dict(
38 type='Anchor3DRangeGenerator',
39 ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]],
40 sizes=[[1.6, 3.9, 1.56]],
41 rotations=[0, 1.57],
42 reshape_out=True),
43 diff_rad_by_sin=True,
44 bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
45 loss_cls=dict(
46 type='FocalLoss',
47 use_sigmoid=True,
48 gamma=2.0,
49 alpha=0.25,
50 loss_weight=1.0),
51 loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),
52 loss_dir=dict(
53 type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)),
54 # model training and testing settings
55 train_cfg=dict(
56 assigner=dict(
57 type='MaxIoUAssigner',
58 iou_calculator=dict(type='BboxOverlapsNearest3D'),
59 pos_iou_thr=0.6,
60 neg_iou_thr=0.45,
61 min_pos_iou=0.45,
62 ignore_iof_thr=-1),
63 allowed_border=0,
64 pos_weight=-1,
65 debug=False),
66 test_cfg=dict(
67 use_rotate_nms=True,
68 nms_across_levels=False,
69 nms_thr=0.01,
70 score_thr=0.1,
71 min_bbox_size=0,
72 nms_pre=100,
73 max_num=50))
74
75 # dataset settings
76 dataset_type = 'KittiDataset'
77 data_root = 'data/kitti/'
78 class_names = ['Car']
79 input_modality = dict(use_lidar=True, use_camera=False)
80 db_sampler = dict(
81 data_root=data_root,
82 info_path=data_root + 'kitti_dbinfos_train.pkl',
83 rate=1.0,
84 prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),
85 sample_groups=dict(Car=15),
86 classes=class_names)
87
88 train_pipeline = [
89 dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
90 dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
91 dict(type='ObjectSample', db_sampler=db_sampler),
92 dict(
93 type='ObjectNoise',
94 num_try=100,
95 loc_noise_std=[0.25, 0.25, 0.25],
96 global_rot_range=[0.0, 0.0],
97 rot_uniform_noise=[-0.15707963267, 0.15707963267]),
98 dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
99 dict(
100 type='GlobalRotScale',
101 rot_uniform_noise=[-0.78539816, 0.78539816],
102 scaling_uniform_noise=[0.95, 1.05]),
103 dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
104 dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
105 dict(type='PointShuffle'),
106 dict(type='DefaultFormatBundle3D', class_names=class_names),
107 dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
108 ]
109 test_pipeline = [
110 dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
111 dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
112 dict(
113 type='DefaultFormatBundle3D',
114 class_names=class_names,
115 with_label=False),
116 dict(type='Collect3D', keys=['points'])
117 ]
118 # construct a pipeline for data and gt loading in show function
119 # please keep its loading function consistent with test_pipeline (e.g. client)
120 eval_pipeline = [
121 dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
122 dict(
123 type='DefaultFormatBundle3D',
124 class_names=class_names,
125 with_label=False),
126 dict(type='Collect3D', keys=['points'])
127 ]
128
129 data = dict(
130 samples_per_gpu=3,
131 workers_per_gpu=3,
132 train=dict(
133 type='RepeatDataset',
134 times=2,
135 dataset=dict(
136 type=dataset_type,
137 data_root=data_root,
138 ann_file=data_root + 'kitti_infos_train.pkl',
139 split='training',
140 pts_prefix='velodyne_reduced',
141 pipeline=train_pipeline,
142 modality=input_modality,
143 classes=class_names,
144 test_mode=False)),
145 val=dict(
146 type=dataset_type,
147 data_root=data_root,
148 ann_file=data_root + 'kitti_infos_val.pkl',
149 split='training',
150 pts_prefix='velodyne_reduced',
151 pipeline=test_pipeline,
152 modality=input_modality,
153 classes=class_names,
154 test_mode=True),
155 test=dict(
156 type=dataset_type,
157 data_root=data_root,
158 ann_file=data_root + 'kitti_infos_val.pkl',
159 split='training',
160 pts_prefix='velodyne_reduced',
161 pipeline=test_pipeline,
162 modality=input_modality,
163 classes=class_names,
164 test_mode=True))
165 # optimizer
166 lr = 0.001 # max learning rate
167 optimizer = dict(
168 type='AdamW',
169 lr=lr,
170 betas=(0.95, 0.99), # the momentum is change during training
171 weight_decay=0.01)
172 optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
173 # learning policy
174 lr_config = dict(
175 policy='cyclic',
176 target_ratio=(10, 1e-4),
177 cyclic_times=1,
178 step_ratio_up=0.4)
179 momentum_config = dict(
180 policy='cyclic',
181 target_ratio=(0.85 / 0.95, 1),
182 cyclic_times=1,
183 step_ratio_up=0.4)
184 checkpoint_config = dict(interval=1)
185 evaluation = dict(interval=1, pipeline=eval_pipeline)
186 # yapf:disable
187 log_config = dict(
188 interval=50,
189 hooks=[
190 dict(type='TextLoggerHook'),
191 dict(type='TensorboardLoggerHook')
192 ])
193 # yapf:enable
194 # runtime settings
195 runner = dict(type='EpochBasedRunner', max_epochs=50)
196 dist_params = dict(backend='nccl')
197 log_level = 'INFO'
198 work_dir = './work_dirs/pp_secfpn_100e'
199 load_from = None
200 resume_from = None
201 workflow = [('train', 50)]
202
[end of configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
--- a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
+++ b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
@@ -92,14 +92,14 @@
dict(
type='ObjectNoise',
num_try=100,
- loc_noise_std=[0.25, 0.25, 0.25],
+ translation_std=[0.25, 0.25, 0.25],
global_rot_range=[0.0, 0.0],
- rot_uniform_noise=[-0.15707963267, 0.15707963267]),
+ rot_range=[-0.15707963267, 0.15707963267]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
- type='GlobalRotScale',
- rot_uniform_noise=[-0.78539816, 0.78539816],
- scaling_uniform_noise=[0.95, 1.05]),
+ type='GlobalRotScaleTrans',
+ rot_range=[-0.78539816, 0.78539816],
+ scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
|
{"golden_diff": "diff --git a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\n--- a/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\n+++ b/configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\n@@ -92,14 +92,14 @@\n dict(\n type='ObjectNoise',\n num_try=100,\n- loc_noise_std=[0.25, 0.25, 0.25],\n+ translation_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n- rot_uniform_noise=[-0.15707963267, 0.15707963267]),\n+ rot_range=[-0.15707963267, 0.15707963267]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n- type='GlobalRotScale',\n- rot_uniform_noise=[-0.78539816, 0.78539816],\n- scaling_uniform_noise=[0.95, 1.05]),\n+ type='GlobalRotScaleTrans',\n+ rot_range=[-0.78539816, 0.78539816],\n+ scale_ratio_range=[0.95, 1.05]),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n", "issue": "Bugs in configs in configs/benchmark/ folder\nThanks for your error report and we appreciate it a lot.\r\n\r\n**Checklist**\r\n1. I have searched related issues but cannot get the expected help.\r\n2. The bug has not been fixed in the latest version.\r\n\r\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**Reproduction**\r\n1. What command or script did you run?\r\n\r\n```\r\nbash ./tools/dist_train.sh configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py 8 --no-validate\r\n```\r\n\r\n2. Did you make any modifications on the code or config? Did you understand what you have modified?\r\n3. What dataset did you use?\r\nKitti\r\n\r\n**Environment**\r\n\r\n1. Please run `python mmdet3d/utils/collect_env.py` to collect necessary environment infomation and paste it here.\r\n\r\n\r\n\r\n2. You may add addition that may be helpful for locating the problem, such as\r\n - How you installed PyTorch [e.g., pip, conda, source]\r\n - Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)\r\n\r\n**Error traceback**\r\nIf applicable, paste the error trackback here.\r\n\r\n```\r\nFile \"projects/mmcv/mmcv/utils/registry.py\", line 54, in build_from_cfg\r\n self.pipeline = Compose(pipeline)\r\nFile \"anaconda3/envs/mmdet3d/lib/python3.7/site-packages/mmdet/datasets/pipelines/compose.py\", line 22, in __init__ \r\n raise type(e)(f'{obj_cls.__name__}: {e}')\r\nKeyError: \"KittiDataset: 'GlobalRotScale is not in the pipeline registry'\"\r\n```\r\n\r\n**Bug fix**\r\nIf you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!\r\n\r\nThe arguments' name and some type of settings are not updated in benchmark folder.\r\n\nInconsistency interface between config and function\nDear developers:\r\n\r\nI found there exists an inconsistent interface of \"ObjectNoise\" between \"mmdet3d/datasets/pipelines/transforms_3d.py\" and \"configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py\". \r\n\r\nEspecially, I found that the options \"loc_noise_std\" and \"rot_uniform_noise\" mentioned in the config file do not appear in the corresponding function.\r\n\r\nI think maybe it is a new feature in progress. I appreciate developers who maintain this repo and contribute to this issue.\n", "before_files": [{"content": "# model settings\nvoxel_size = [0.16, 0.16, 4]\npoint_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]\nmodel = dict(\n type='VoxelNet',\n voxel_layer=dict(\n max_num_points=64,\n point_cloud_range=point_cloud_range,\n voxel_size=voxel_size,\n max_voxels=(12000, 20000)),\n voxel_encoder=dict(\n type='PillarFeatureNet',\n in_channels=4,\n feat_channels=[64],\n with_distance=False,\n voxel_size=voxel_size,\n point_cloud_range=point_cloud_range),\n middle_encoder=dict(\n type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]),\n backbone=dict(\n type='SECOND',\n in_channels=64,\n layer_nums=[3, 5, 5],\n layer_strides=[2, 2, 2],\n out_channels=[64, 128, 256]),\n neck=dict(\n type='SECONDFPN',\n in_channels=[64, 128, 256],\n upsample_strides=[1, 2, 4],\n out_channels=[128, 128, 128]),\n bbox_head=dict(\n type='Anchor3DHead',\n num_classes=1,\n in_channels=384,\n feat_channels=384,\n use_direction_classifier=True,\n anchor_generator=dict(\n type='Anchor3DRangeGenerator',\n ranges=[[0, -39.68, -1.78, 69.12, 39.68, -1.78]],\n sizes=[[1.6, 3.9, 1.56]],\n rotations=[0, 1.57],\n reshape_out=True),\n diff_rad_by_sin=True,\n bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),\n loss_dir=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)),\n # model training and testing settings\n train_cfg=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n iou_calculator=dict(type='BboxOverlapsNearest3D'),\n pos_iou_thr=0.6,\n neg_iou_thr=0.45,\n min_pos_iou=0.45,\n ignore_iof_thr=-1),\n allowed_border=0,\n pos_weight=-1,\n debug=False),\n test_cfg=dict(\n use_rotate_nms=True,\n nms_across_levels=False,\n nms_thr=0.01,\n score_thr=0.1,\n min_bbox_size=0,\n nms_pre=100,\n max_num=50))\n\n# dataset settings\ndataset_type = 'KittiDataset'\ndata_root = 'data/kitti/'\nclass_names = ['Car']\ninput_modality = dict(use_lidar=True, use_camera=False)\ndb_sampler = dict(\n data_root=data_root,\n info_path=data_root + 'kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n sample_groups=dict(Car=15),\n classes=class_names)\n\ntrain_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),\n dict(type='ObjectSample', db_sampler=db_sampler),\n dict(\n type='ObjectNoise',\n num_try=100,\n loc_noise_std=[0.25, 0.25, 0.25],\n global_rot_range=[0.0, 0.0],\n rot_uniform_noise=[-0.15707963267, 0.15707963267]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScale',\n rot_uniform_noise=[-0.78539816, 0.78539816],\n scaling_uniform_noise=[0.95, 1.05]),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n]\ntest_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n]\n# construct a pipeline for data and gt loading in show function\n# please keep its loading function consistent with test_pipeline (e.g. client)\neval_pipeline = [\n dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n]\n\ndata = dict(\n samples_per_gpu=3,\n workers_per_gpu=3,\n train=dict(\n type='RepeatDataset',\n times=2,\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_train.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=train_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=False)),\n val=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True),\n test=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True))\n# optimizer\nlr = 0.001 # max learning rate\noptimizer = dict(\n type='AdamW',\n lr=lr,\n betas=(0.95, 0.99), # the momentum is change during training\n weight_decay=0.01)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='cyclic',\n target_ratio=(10, 1e-4),\n cyclic_times=1,\n step_ratio_up=0.4)\nmomentum_config = dict(\n policy='cyclic',\n target_ratio=(0.85 / 0.95, 1),\n cyclic_times=1,\n step_ratio_up=0.4)\ncheckpoint_config = dict(interval=1)\nevaluation = dict(interval=1, pipeline=eval_pipeline)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')\n ])\n# yapf:enable\n# runtime settings\nrunner = dict(type='EpochBasedRunner', max_epochs=50)\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/pp_secfpn_100e'\nload_from = None\nresume_from = None\nworkflow = [('train', 50)]\n", "path": "configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py"}]}
| 3,587 | 469 |
gh_patches_debug_14824
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-190
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ignoreDefaultArgs` launch parameter is broken
I'm trying to disable default launch arguments however when this parameter is set to True one of two things happen:
```
browser: Browser = chromium.launch(ignoreDefaultArgs=True)
```
will break with:
```
File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 66, in launch
normalize_launch_params(params)
File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 140, in normalize_launch_params
params["env"] = {name: str(value) for [name, value] in params["env"].items()}
KeyError: 'env'
```
I've made a hotfix patch here https://github.com/Granitosaurus/playwright-python/commit/2dc0e107719097e1105e36149a7fe8238c210e38 now the browser launches fine but timesout on `launch()` call:
```
Traceback (most recent call last):
File "/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py", line 68, in launch
return from_channel(await self._channel.send("launch", params))
File "/home/user/.local/lib/python3.8/site-packages/playwright/connection.py", line 39, in send
result = await callback.future
File "/usr/lib/python3.8/asyncio/futures.py", line 260, in __await__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.8/asyncio/tasks.py", line 349, in __wakeup
future.result()
File "/usr/lib/python3.8/asyncio/futures.py", line 178, in result
raise self._exception
playwright.helper.TimeoutError: Timeout 30000ms exceeded.
=========================== logs ===========================
<launching> /home/user/.cache/ms-playwright/chromium-799411/chrome-linux/chrome
<launched> pid=301672
[err] [301699:301699:0914/061714.839670:ERROR:sandbox_linux.cc(374)] InitializeSandbox() called with multiple threads in process gpu-process.
```
</issue>
<code>
[start of playwright/browser_type.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from pathlib import Path
16 from typing import Dict, List, Union
17
18 from playwright.browser import Browser
19 from playwright.browser_context import BrowserContext
20 from playwright.connection import ChannelOwner, from_channel
21 from playwright.helper import (
22 ColorScheme,
23 Credentials,
24 Env,
25 Geolocation,
26 IntSize,
27 ProxyServer,
28 locals_to_params,
29 not_installed_error,
30 )
31 from playwright.network import serialize_headers
32
33
34 class BrowserType(ChannelOwner):
35 def __init__(
36 self, parent: ChannelOwner, type: str, guid: str, initializer: Dict
37 ) -> None:
38 super().__init__(parent, type, guid, initializer)
39
40 @property
41 def name(self) -> str:
42 return self._initializer["name"]
43
44 @property
45 def executablePath(self) -> str:
46 return self._initializer["executablePath"]
47
48 async def launch(
49 self,
50 executablePath: Union[str, Path] = None,
51 args: List[str] = None,
52 ignoreDefaultArgs: Union[bool, List[str]] = None,
53 handleSIGINT: bool = None,
54 handleSIGTERM: bool = None,
55 handleSIGHUP: bool = None,
56 timeout: int = None,
57 env: Env = None,
58 headless: bool = None,
59 devtools: bool = None,
60 proxy: ProxyServer = None,
61 downloadsPath: Union[str, Path] = None,
62 slowMo: int = None,
63 chromiumSandbox: bool = None,
64 ) -> Browser:
65 params = locals_to_params(locals())
66 normalize_launch_params(params)
67 try:
68 return from_channel(await self._channel.send("launch", params))
69 except Exception as e:
70 if f"{self.name}-" in str(e):
71 raise not_installed_error(f'"{self.name}" browser was not found.')
72 raise e
73
74 async def launchPersistentContext(
75 self,
76 userDataDir: Union[str, Path],
77 executablePath: Union[str, Path] = None,
78 args: List[str] = None,
79 ignoreDefaultArgs: Union[bool, List[str]] = None,
80 handleSIGINT: bool = None,
81 handleSIGTERM: bool = None,
82 handleSIGHUP: bool = None,
83 timeout: int = None,
84 env: Env = None,
85 headless: bool = None,
86 devtools: bool = None,
87 proxy: ProxyServer = None,
88 downloadsPath: Union[str, Path] = None,
89 slowMo: int = None,
90 viewport: IntSize = None,
91 ignoreHTTPSErrors: bool = None,
92 javaScriptEnabled: bool = None,
93 bypassCSP: bool = None,
94 userAgent: str = None,
95 locale: str = None,
96 timezoneId: str = None,
97 geolocation: Geolocation = None,
98 permissions: List[str] = None,
99 extraHTTPHeaders: Dict[str, str] = None,
100 offline: bool = None,
101 httpCredentials: Credentials = None,
102 deviceScaleFactor: int = None,
103 isMobile: bool = None,
104 hasTouch: bool = None,
105 colorScheme: ColorScheme = None,
106 acceptDownloads: bool = None,
107 chromiumSandbox: bool = None,
108 ) -> BrowserContext:
109 userDataDir = str(Path(userDataDir))
110 params = locals_to_params(locals())
111 if extraHTTPHeaders:
112 params["extraHTTPHeaders"] = serialize_headers(extraHTTPHeaders)
113 normalize_launch_params(params)
114 try:
115 return from_channel(
116 await self._channel.send("launchPersistentContext", params)
117 )
118 except Exception as e:
119 if f"{self.name}-" in str(e):
120 raise not_installed_error(f'"{self.name}" browser was not found.')
121 raise e
122
123 async def connect(
124 self, wsEndpoint: str, slowMo: int = None, timeout: int = None
125 ) -> Browser:
126 return from_channel(
127 await self._channel.send("connect", locals_to_params(locals()))
128 )
129
130
131 def normalize_launch_params(params: Dict) -> None:
132 if "env" in params:
133 params["env"] = {name: str(value) for [name, value] in params["env"].items()}
134 if "ignoreDefaultArgs" in params:
135 if isinstance(params["ignoreDefaultArgs"], bool):
136 params["ignoreAllDefaultArgs"] = True
137 del params["ignoreDefaultArgs"]
138 params["env"] = {name: str(value) for [name, value] in params["env"].items()}
139 if "executablePath" in params:
140 params["executablePath"] = str(Path(params["executablePath"]))
141 if "downloadsPath" in params:
142 params["downloadsPath"] = str(Path(params["downloadsPath"]))
143
[end of playwright/browser_type.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/playwright/browser_type.py b/playwright/browser_type.py
--- a/playwright/browser_type.py
+++ b/playwright/browser_type.py
@@ -132,10 +132,9 @@
if "env" in params:
params["env"] = {name: str(value) for [name, value] in params["env"].items()}
if "ignoreDefaultArgs" in params:
- if isinstance(params["ignoreDefaultArgs"], bool):
+ if params["ignoreDefaultArgs"] is True:
params["ignoreAllDefaultArgs"] = True
del params["ignoreDefaultArgs"]
- params["env"] = {name: str(value) for [name, value] in params["env"].items()}
if "executablePath" in params:
params["executablePath"] = str(Path(params["executablePath"]))
if "downloadsPath" in params:
|
{"golden_diff": "diff --git a/playwright/browser_type.py b/playwright/browser_type.py\n--- a/playwright/browser_type.py\n+++ b/playwright/browser_type.py\n@@ -132,10 +132,9 @@\n if \"env\" in params:\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"ignoreDefaultArgs\" in params:\n- if isinstance(params[\"ignoreDefaultArgs\"], bool):\n+ if params[\"ignoreDefaultArgs\"] is True:\n params[\"ignoreAllDefaultArgs\"] = True\n del params[\"ignoreDefaultArgs\"]\n- params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"executablePath\" in params:\n params[\"executablePath\"] = str(Path(params[\"executablePath\"]))\n if \"downloadsPath\" in params:\n", "issue": "`ignoreDefaultArgs` launch parameter is broken\nI'm trying to disable default launch arguments however when this parameter is set to True one of two things happen:\r\n\r\n```\r\nbrowser: Browser = chromium.launch(ignoreDefaultArgs=True)\r\n```\r\nwill break with:\r\n```\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py\", line 66, in launch\r\n normalize_launch_params(params)\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py\", line 140, in normalize_launch_params\r\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\r\nKeyError: 'env'\r\n``` \r\n\r\nI've made a hotfix patch here https://github.com/Granitosaurus/playwright-python/commit/2dc0e107719097e1105e36149a7fe8238c210e38 now the browser launches fine but timesout on `launch()` call:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/browser_type.py\", line 68, in launch\r\n return from_channel(await self._channel.send(\"launch\", params))\r\n File \"/home/user/.local/lib/python3.8/site-packages/playwright/connection.py\", line 39, in send\r\n result = await callback.future\r\n File \"/usr/lib/python3.8/asyncio/futures.py\", line 260, in __await__\r\n yield self # This tells Task to wait for completion.\r\n File \"/usr/lib/python3.8/asyncio/tasks.py\", line 349, in __wakeup\r\n future.result()\r\n File \"/usr/lib/python3.8/asyncio/futures.py\", line 178, in result\r\n raise self._exception\r\nplaywright.helper.TimeoutError: Timeout 30000ms exceeded.\r\n=========================== logs ===========================\r\n<launching> /home/user/.cache/ms-playwright/chromium-799411/chrome-linux/chrome \r\n<launched> pid=301672\r\n[err] [301699:301699:0914/061714.839670:ERROR:sandbox_linux.cc(374)] InitializeSandbox() called with multiple threads in process gpu-process.\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pathlib import Path\nfrom typing import Dict, List, Union\n\nfrom playwright.browser import Browser\nfrom playwright.browser_context import BrowserContext\nfrom playwright.connection import ChannelOwner, from_channel\nfrom playwright.helper import (\n ColorScheme,\n Credentials,\n Env,\n Geolocation,\n IntSize,\n ProxyServer,\n locals_to_params,\n not_installed_error,\n)\nfrom playwright.network import serialize_headers\n\n\nclass BrowserType(ChannelOwner):\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n\n @property\n def name(self) -> str:\n return self._initializer[\"name\"]\n\n @property\n def executablePath(self) -> str:\n return self._initializer[\"executablePath\"]\n\n async def launch(\n self,\n executablePath: Union[str, Path] = None,\n args: List[str] = None,\n ignoreDefaultArgs: Union[bool, List[str]] = None,\n handleSIGINT: bool = None,\n handleSIGTERM: bool = None,\n handleSIGHUP: bool = None,\n timeout: int = None,\n env: Env = None,\n headless: bool = None,\n devtools: bool = None,\n proxy: ProxyServer = None,\n downloadsPath: Union[str, Path] = None,\n slowMo: int = None,\n chromiumSandbox: bool = None,\n ) -> Browser:\n params = locals_to_params(locals())\n normalize_launch_params(params)\n try:\n return from_channel(await self._channel.send(\"launch\", params))\n except Exception as e:\n if f\"{self.name}-\" in str(e):\n raise not_installed_error(f'\"{self.name}\" browser was not found.')\n raise e\n\n async def launchPersistentContext(\n self,\n userDataDir: Union[str, Path],\n executablePath: Union[str, Path] = None,\n args: List[str] = None,\n ignoreDefaultArgs: Union[bool, List[str]] = None,\n handleSIGINT: bool = None,\n handleSIGTERM: bool = None,\n handleSIGHUP: bool = None,\n timeout: int = None,\n env: Env = None,\n headless: bool = None,\n devtools: bool = None,\n proxy: ProxyServer = None,\n downloadsPath: Union[str, Path] = None,\n slowMo: int = None,\n viewport: IntSize = None,\n ignoreHTTPSErrors: bool = None,\n javaScriptEnabled: bool = None,\n bypassCSP: bool = None,\n userAgent: str = None,\n locale: str = None,\n timezoneId: str = None,\n geolocation: Geolocation = None,\n permissions: List[str] = None,\n extraHTTPHeaders: Dict[str, str] = None,\n offline: bool = None,\n httpCredentials: Credentials = None,\n deviceScaleFactor: int = None,\n isMobile: bool = None,\n hasTouch: bool = None,\n colorScheme: ColorScheme = None,\n acceptDownloads: bool = None,\n chromiumSandbox: bool = None,\n ) -> BrowserContext:\n userDataDir = str(Path(userDataDir))\n params = locals_to_params(locals())\n if extraHTTPHeaders:\n params[\"extraHTTPHeaders\"] = serialize_headers(extraHTTPHeaders)\n normalize_launch_params(params)\n try:\n return from_channel(\n await self._channel.send(\"launchPersistentContext\", params)\n )\n except Exception as e:\n if f\"{self.name}-\" in str(e):\n raise not_installed_error(f'\"{self.name}\" browser was not found.')\n raise e\n\n async def connect(\n self, wsEndpoint: str, slowMo: int = None, timeout: int = None\n ) -> Browser:\n return from_channel(\n await self._channel.send(\"connect\", locals_to_params(locals()))\n )\n\n\ndef normalize_launch_params(params: Dict) -> None:\n if \"env\" in params:\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"ignoreDefaultArgs\" in params:\n if isinstance(params[\"ignoreDefaultArgs\"], bool):\n params[\"ignoreAllDefaultArgs\"] = True\n del params[\"ignoreDefaultArgs\"]\n params[\"env\"] = {name: str(value) for [name, value] in params[\"env\"].items()}\n if \"executablePath\" in params:\n params[\"executablePath\"] = str(Path(params[\"executablePath\"]))\n if \"downloadsPath\" in params:\n params[\"downloadsPath\"] = str(Path(params[\"downloadsPath\"]))\n", "path": "playwright/browser_type.py"}]}
| 2,533 | 191 |
gh_patches_debug_2041
|
rasdani/github-patches
|
git_diff
|
Pyomo__pyomo-2633
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixed Vars unpickle as stale
## Summary
I'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.
### Steps to reproduce the issue
```
from pyomo.environ import *
import pickle
m = ConcreteModel()
m.x = Var(domain=Binary)
m.x.fix(1)
unpickle = pickle.loads(pickle.dumps(m))
m.x.pprint()
unpickle.x.pprint()
```
```
x : Size=1, Index=None
Key : Lower : Value : Upper : Fixed : Stale : Domain
None : 0 : 1 : 1 : True : False : Binary
x : Size=1, Index=None
Key : Lower : Value : Upper : Fixed : Stale : Domain
None : 0 : 1 : 1 : True : True : Binary
```
### Error Message
It seems like these models should be identical, even up to stale-ness, right?
### Information on your system
Pyomo version: main
Python version: 3.8
Operating system: linux
How Pyomo was installed (PyPI, conda, source): source
Solver (if applicable):
</issue>
<code>
[start of pyomo/core/staleflag.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # ___________________________________________________________________________
11
12 class _StaleFlagManager(object):
13 def __init__(self):
14 self._current = 0
15 self.mark_all_as_stale()
16
17 def stale_mapper(self, encode, value):
18 if encode:
19 return self.is_stale(value)
20 else:
21 if value:
22 return 0
23 else:
24 self.get_flag(0)
25
26 def _get_flag(self, current_flag):
27 """Return the current global stale flag value"""
28 return self._current
29
30 def _get_flag_delayed(self, current_flag):
31 """Implement the "delayed" advancement of the global stale flag value
32
33 This will continue to return the current value of the state flag
34 until the first non-stale variable is updated (that it, it is
35 passed the current stale flag when called). This allows for
36 updating stale variable values without incrementing the global
37 stale flag, but will mark everything as stale as soon as a
38 non-stale variable value is changed.
39
40 """
41 if current_flag == self._current:
42 self._current += 1
43 setattr(self, 'get_flag', getattr(self, '_get_flag'))
44 return self._current
45
46 def is_stale(self, val):
47 """Return ``True`` if the passed value indicated a stale variable"""
48 return val != self._current
49
50 def mark_all_as_stale(self, delayed=False):
51 """Advance the global stale flag, marking all variables as stale
52
53 This is generally called immediately before and after a batch
54 variable update (i.e. loading values from a solver result or
55 stored solution). Before the batch update
56 :meth:`mark_all_as_stale` is called with ``delayed=False``,
57 which immediately marks all variables as stale. After the batch
58 update, :meth:`mark_all_as_stale` is typically called with
59 ``delayed=True``. This allows additional stale variables to be
60 updated without advancing the global flag, but as soon as any
61 non-stale variable has its value changed, then the flag is
62 advanced and all other variables become stale.
63
64 """
65 if delayed:
66 setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))
67 else:
68 setattr(self, 'get_flag', getattr(self, '_get_flag'))
69 self._current += 1
70
71 StaleFlagManager = _StaleFlagManager()
72
[end of pyomo/core/staleflag.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py
--- a/pyomo/core/staleflag.py
+++ b/pyomo/core/staleflag.py
@@ -21,7 +21,7 @@
if value:
return 0
else:
- self.get_flag(0)
+ return self.get_flag(0)
def _get_flag(self, current_flag):
"""Return the current global stale flag value"""
|
{"golden_diff": "diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py\n--- a/pyomo/core/staleflag.py\n+++ b/pyomo/core/staleflag.py\n@@ -21,7 +21,7 @@\n if value:\n return 0\n else:\n- self.get_flag(0)\n+ return self.get_flag(0)\n \n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n", "issue": "Fixed Vars unpickle as stale\n## Summary\r\n\r\nI'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.\r\n\r\n### Steps to reproduce the issue\r\n\r\n```\r\nfrom pyomo.environ import *\r\nimport pickle\r\n\r\nm = ConcreteModel()\r\nm.x = Var(domain=Binary)\r\nm.x.fix(1)\r\n\r\nunpickle = pickle.loads(pickle.dumps(m))\r\n\r\nm.x.pprint()\r\nunpickle.x.pprint()\r\n```\r\n\r\n```\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : False : Binary\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : True : Binary\r\n```\r\n\r\n### Error Message\r\n\r\nIt seems like these models should be identical, even up to stale-ness, right?\r\n\r\n### Information on your system\r\n\r\nPyomo version: main\r\nPython version: 3.8\r\nOperating system: linux\r\nHow Pyomo was installed (PyPI, conda, source): source \r\nSolver (if applicable):\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py"}]}
| 1,592 | 102 |
gh_patches_debug_40929
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-449
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem with Popen arguments passed as map
Greetings!
I am having an issue when using the package `sorl.thumbnail` on a Django project. That package produces thumbnails from images using system installed programs (called engines.)
When `sorl.thumbnail` makes a `Popen` call here:
https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/convert_engine.py#L57
The arguments that are received in your patched `Popen` are not being interpreted correctly. This is the suspect of the problem:
https://github.com/getsentry/sentry-python/blob/2c0a2ea96882adf69742bd7e79e588564ff5b85c/sentry_sdk/integrations/stdlib.py#L143
Arguments are handled in such a way that the original `popen` crashes when trying to access `args[0]`, as the `args` parameter is interpreted (or actually becomes) an empty list.
In my opinion, the problem arises because in `sorl.thumbnail` the arguments to `Popen` are passed to a `map` function first, so that a `map` object is what gets passed as arguments. I am not sure what's the effect of a `map` object being received in your `*a` of the patched `Popen`.
Here's the relevant part of the traceback I am getting:
```
File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py", line 131, in get_thumbnail
thumbnail)
File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py", line 164, in _create_thumbnail
default.engine.write(image, options, thumbnail)
File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/engines/convert_engine.py", line 57, in write
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
File "/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sentry_sdk/integrations/stdlib.py", line 165, in sentry_patched_popen_init
return old_popen_init(self, *a, **kw)
File "/usr/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.6/subprocess.py", line 1248, in _execute_child
executable = args[0]
IndexError: list index out of range
```
If I remove the Sentry integration altogether from the Django project, `sorl.thumbnail` is able to effectively call `Popen` with the "mapped" arguments, and everything works.
Thanks in advance!
</issue>
<code>
[start of sentry_sdk/integrations/stdlib.py]
1 import os
2 import subprocess
3 import sys
4 import platform
5
6 from sentry_sdk.hub import Hub
7 from sentry_sdk.integrations import Integration
8 from sentry_sdk.scope import add_global_event_processor
9 from sentry_sdk.tracing import EnvironHeaders, record_http_request
10
11 try:
12 from httplib import HTTPConnection # type: ignore
13 except ImportError:
14 from http.client import HTTPConnection
15
16 _RUNTIME_CONTEXT = {
17 "name": platform.python_implementation(),
18 "version": "%s.%s.%s" % (sys.version_info[:3]),
19 "build": sys.version,
20 }
21
22
23 class StdlibIntegration(Integration):
24 identifier = "stdlib"
25
26 @staticmethod
27 def setup_once():
28 # type: () -> None
29 _install_httplib()
30 _install_subprocess()
31
32 @add_global_event_processor
33 def add_python_runtime_context(event, hint):
34 if Hub.current.get_integration(StdlibIntegration) is not None:
35 contexts = event.setdefault("contexts", {})
36 if isinstance(contexts, dict) and "runtime" not in contexts:
37 contexts["runtime"] = _RUNTIME_CONTEXT
38
39 return event
40
41
42 def _install_httplib():
43 # type: () -> None
44 real_putrequest = HTTPConnection.putrequest
45 real_getresponse = HTTPConnection.getresponse
46
47 def putrequest(self, method, url, *args, **kwargs):
48 hub = Hub.current
49 if hub.get_integration(StdlibIntegration) is None:
50 return real_putrequest(self, method, url, *args, **kwargs)
51
52 host = self.host
53 port = self.port
54 default_port = self.default_port
55
56 real_url = url
57 if not real_url.startswith(("http://", "https://")):
58 real_url = "%s://%s%s%s" % (
59 default_port == 443 and "https" or "http",
60 host,
61 port != default_port and ":%s" % port or "",
62 url,
63 )
64
65 recorder = record_http_request(hub, real_url, method)
66 data_dict = recorder.__enter__()
67
68 try:
69 rv = real_putrequest(self, method, url, *args, **kwargs)
70
71 for key, value in hub.iter_trace_propagation_headers():
72 self.putheader(key, value)
73 except Exception:
74 recorder.__exit__(*sys.exc_info())
75 raise
76
77 self._sentrysdk_recorder = recorder
78 self._sentrysdk_data_dict = data_dict
79
80 return rv
81
82 def getresponse(self, *args, **kwargs):
83 recorder = getattr(self, "_sentrysdk_recorder", None)
84
85 if recorder is None:
86 return real_getresponse(self, *args, **kwargs)
87
88 data_dict = getattr(self, "_sentrysdk_data_dict", None)
89
90 try:
91 rv = real_getresponse(self, *args, **kwargs)
92
93 if data_dict is not None:
94 data_dict["httplib_response"] = rv
95 data_dict["status_code"] = rv.status
96 data_dict["reason"] = rv.reason
97 except TypeError:
98 # python-requests provokes a typeerror to discover py3 vs py2 differences
99 #
100 # > TypeError("getresponse() got an unexpected keyword argument 'buffering'")
101 raise
102 except Exception:
103 recorder.__exit__(*sys.exc_info())
104 raise
105 else:
106 recorder.__exit__(None, None, None)
107
108 return rv
109
110 HTTPConnection.putrequest = putrequest
111 HTTPConnection.getresponse = getresponse
112
113
114 def _init_argument(args, kwargs, name, position, setdefault_callback=None):
115 """
116 given (*args, **kwargs) of a function call, retrieve (and optionally set a
117 default for) an argument by either name or position.
118
119 This is useful for wrapping functions with complex type signatures and
120 extracting a few arguments without needing to redefine that function's
121 entire type signature.
122 """
123
124 if name in kwargs:
125 rv = kwargs[name]
126 if rv is None and setdefault_callback is not None:
127 rv = kwargs[name] = setdefault_callback()
128 elif position < len(args):
129 rv = args[position]
130 if rv is None and setdefault_callback is not None:
131 rv = args[position] = setdefault_callback()
132 else:
133 rv = setdefault_callback and setdefault_callback()
134 if rv is not None:
135 kwargs[name] = rv
136
137 return rv
138
139
140 def _install_subprocess():
141 old_popen_init = subprocess.Popen.__init__
142
143 def sentry_patched_popen_init(self, *a, **kw):
144 hub = Hub.current
145 if hub.get_integration(StdlibIntegration) is None:
146 return old_popen_init(self, *a, **kw)
147
148 # do not setdefault! args is required by Popen, doing setdefault would
149 # make invalid calls valid
150 args = _init_argument(a, kw, "args", 0) or []
151 cwd = _init_argument(a, kw, "cwd", 10)
152
153 env = None
154
155 for k, v in hub.iter_trace_propagation_headers():
156 if env is None:
157 env = _init_argument(a, kw, "env", 11, lambda: dict(os.environ))
158 env["SUBPROCESS_" + k.upper().replace("-", "_")] = v
159
160 with hub.span(op="subprocess", description=" ".join(map(str, args))) as span:
161 span.set_tag("subprocess.cwd", cwd)
162
163 return old_popen_init(self, *a, **kw)
164
165 subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore
166
167
168 def get_subprocess_traceparent_headers():
169 return EnvironHeaders(os.environ, prefix="SUBPROCESS_")
170
[end of sentry_sdk/integrations/stdlib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py
--- a/sentry_sdk/integrations/stdlib.py
+++ b/sentry_sdk/integrations/stdlib.py
@@ -7,12 +7,15 @@
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
from sentry_sdk.tracing import EnvironHeaders, record_http_request
+from sentry_sdk.utils import capture_internal_exceptions, safe_repr
+
try:
from httplib import HTTPConnection # type: ignore
except ImportError:
from http.client import HTTPConnection
+
_RUNTIME_CONTEXT = {
"name": platform.python_implementation(),
"version": "%s.%s.%s" % (sys.version_info[:3]),
@@ -123,14 +126,18 @@
if name in kwargs:
rv = kwargs[name]
- if rv is None and setdefault_callback is not None:
- rv = kwargs[name] = setdefault_callback()
+ if setdefault_callback is not None:
+ rv = setdefault_callback(rv)
+ if rv is not None:
+ kwargs[name] = rv
elif position < len(args):
rv = args[position]
- if rv is None and setdefault_callback is not None:
- rv = args[position] = setdefault_callback()
+ if setdefault_callback is not None:
+ rv = setdefault_callback(rv)
+ if rv is not None:
+ args[position] = rv
else:
- rv = setdefault_callback and setdefault_callback()
+ rv = setdefault_callback and setdefault_callback(None)
if rv is not None:
kwargs[name] = rv
@@ -145,20 +152,37 @@
if hub.get_integration(StdlibIntegration) is None:
return old_popen_init(self, *a, **kw)
- # do not setdefault! args is required by Popen, doing setdefault would
- # make invalid calls valid
+ # Convert from tuple to list to be able to set values.
+ a = list(a)
+
args = _init_argument(a, kw, "args", 0) or []
- cwd = _init_argument(a, kw, "cwd", 10)
+ cwd = _init_argument(a, kw, "cwd", 9)
+
+ # if args is not a list or tuple (and e.g. some iterator instead),
+ # let's not use it at all. There are too many things that can go wrong
+ # when trying to collect an iterator into a list and setting that list
+ # into `a` again.
+ #
+ # Also invocations where `args` is not a sequence are not actually
+ # legal. They just happen to work under CPython.
+ description = None
+
+ if isinstance(args, (list, tuple)) and len(args) < 100:
+ with capture_internal_exceptions():
+ description = " ".join(map(str, args))
+
+ if description is None:
+ description = safe_repr(args)
env = None
for k, v in hub.iter_trace_propagation_headers():
if env is None:
- env = _init_argument(a, kw, "env", 11, lambda: dict(os.environ))
+ env = _init_argument(a, kw, "env", 10, lambda x: dict(x or os.environ))
env["SUBPROCESS_" + k.upper().replace("-", "_")] = v
- with hub.span(op="subprocess", description=" ".join(map(str, args))) as span:
- span.set_tag("subprocess.cwd", cwd)
+ with hub.span(op="subprocess", description=description) as span:
+ span.set_data("subprocess.cwd", cwd)
return old_popen_init(self, *a, **kw)
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py\n--- a/sentry_sdk/integrations/stdlib.py\n+++ b/sentry_sdk/integrations/stdlib.py\n@@ -7,12 +7,15 @@\n from sentry_sdk.integrations import Integration\n from sentry_sdk.scope import add_global_event_processor\n from sentry_sdk.tracing import EnvironHeaders, record_http_request\n+from sentry_sdk.utils import capture_internal_exceptions, safe_repr\n+\n \n try:\n from httplib import HTTPConnection # type: ignore\n except ImportError:\n from http.client import HTTPConnection\n \n+\n _RUNTIME_CONTEXT = {\n \"name\": platform.python_implementation(),\n \"version\": \"%s.%s.%s\" % (sys.version_info[:3]),\n@@ -123,14 +126,18 @@\n \n if name in kwargs:\n rv = kwargs[name]\n- if rv is None and setdefault_callback is not None:\n- rv = kwargs[name] = setdefault_callback()\n+ if setdefault_callback is not None:\n+ rv = setdefault_callback(rv)\n+ if rv is not None:\n+ kwargs[name] = rv\n elif position < len(args):\n rv = args[position]\n- if rv is None and setdefault_callback is not None:\n- rv = args[position] = setdefault_callback()\n+ if setdefault_callback is not None:\n+ rv = setdefault_callback(rv)\n+ if rv is not None:\n+ args[position] = rv\n else:\n- rv = setdefault_callback and setdefault_callback()\n+ rv = setdefault_callback and setdefault_callback(None)\n if rv is not None:\n kwargs[name] = rv\n \n@@ -145,20 +152,37 @@\n if hub.get_integration(StdlibIntegration) is None:\n return old_popen_init(self, *a, **kw)\n \n- # do not setdefault! args is required by Popen, doing setdefault would\n- # make invalid calls valid\n+ # Convert from tuple to list to be able to set values.\n+ a = list(a)\n+\n args = _init_argument(a, kw, \"args\", 0) or []\n- cwd = _init_argument(a, kw, \"cwd\", 10)\n+ cwd = _init_argument(a, kw, \"cwd\", 9)\n+\n+ # if args is not a list or tuple (and e.g. some iterator instead),\n+ # let's not use it at all. There are too many things that can go wrong\n+ # when trying to collect an iterator into a list and setting that list\n+ # into `a` again.\n+ #\n+ # Also invocations where `args` is not a sequence are not actually\n+ # legal. They just happen to work under CPython.\n+ description = None\n+\n+ if isinstance(args, (list, tuple)) and len(args) < 100:\n+ with capture_internal_exceptions():\n+ description = \" \".join(map(str, args))\n+\n+ if description is None:\n+ description = safe_repr(args)\n \n env = None\n \n for k, v in hub.iter_trace_propagation_headers():\n if env is None:\n- env = _init_argument(a, kw, \"env\", 11, lambda: dict(os.environ))\n+ env = _init_argument(a, kw, \"env\", 10, lambda x: dict(x or os.environ))\n env[\"SUBPROCESS_\" + k.upper().replace(\"-\", \"_\")] = v\n \n- with hub.span(op=\"subprocess\", description=\" \".join(map(str, args))) as span:\n- span.set_tag(\"subprocess.cwd\", cwd)\n+ with hub.span(op=\"subprocess\", description=description) as span:\n+ span.set_data(\"subprocess.cwd\", cwd)\n \n return old_popen_init(self, *a, **kw)\n", "issue": "Problem with Popen arguments passed as map\nGreetings!\r\n\r\nI am having an issue when using the package `sorl.thumbnail` on a Django project. That package produces thumbnails from images using system installed programs (called engines.)\r\n\r\nWhen `sorl.thumbnail` makes a `Popen` call here:\r\n\r\nhttps://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/engines/convert_engine.py#L57\r\n\r\nThe arguments that are received in your patched `Popen` are not being interpreted correctly. This is the suspect of the problem:\r\n\r\nhttps://github.com/getsentry/sentry-python/blob/2c0a2ea96882adf69742bd7e79e588564ff5b85c/sentry_sdk/integrations/stdlib.py#L143\r\n\r\nArguments are handled in such a way that the original `popen` crashes when trying to access `args[0]`, as the `args` parameter is interpreted (or actually becomes) an empty list.\r\n\r\nIn my opinion, the problem arises because in `sorl.thumbnail` the arguments to `Popen` are passed to a `map` function first, so that a `map` object is what gets passed as arguments. I am not sure what's the effect of a `map` object being received in your `*a` of the patched `Popen`.\r\n\r\nHere's the relevant part of the traceback I am getting:\r\n\r\n```\r\nFile \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py\", line 131, in get_thumbnail\r\n thumbnail)\r\n File \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/base.py\", line 164, in _create_thumbnail\r\n default.engine.write(image, options, thumbnail)\r\n File \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sorl/thumbnail/engines/convert_engine.py\", line 57, in write\r\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n File \"/mnt/d/Proyectos/ginseng/.venv/lib/python3.6/site-packages/sentry_sdk/integrations/stdlib.py\", line 165, in sentry_patched_popen_init\r\n return old_popen_init(self, *a, **kw)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 729, in __init__\r\n restore_signals, start_new_session)\r\n File \"/usr/lib/python3.6/subprocess.py\", line 1248, in _execute_child\r\n executable = args[0]\r\nIndexError: list index out of range\r\n```\r\n\r\nIf I remove the Sentry integration altogether from the Django project, `sorl.thumbnail` is able to effectively call `Popen` with the \"mapped\" arguments, and everything works.\r\n\r\nThanks in advance!\n", "before_files": [{"content": "import os\nimport subprocess\nimport sys\nimport platform\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.scope import add_global_event_processor\nfrom sentry_sdk.tracing import EnvironHeaders, record_http_request\n\ntry:\n from httplib import HTTPConnection # type: ignore\nexcept ImportError:\n from http.client import HTTPConnection\n\n_RUNTIME_CONTEXT = {\n \"name\": platform.python_implementation(),\n \"version\": \"%s.%s.%s\" % (sys.version_info[:3]),\n \"build\": sys.version,\n}\n\n\nclass StdlibIntegration(Integration):\n identifier = \"stdlib\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n _install_httplib()\n _install_subprocess()\n\n @add_global_event_processor\n def add_python_runtime_context(event, hint):\n if Hub.current.get_integration(StdlibIntegration) is not None:\n contexts = event.setdefault(\"contexts\", {})\n if isinstance(contexts, dict) and \"runtime\" not in contexts:\n contexts[\"runtime\"] = _RUNTIME_CONTEXT\n\n return event\n\n\ndef _install_httplib():\n # type: () -> None\n real_putrequest = HTTPConnection.putrequest\n real_getresponse = HTTPConnection.getresponse\n\n def putrequest(self, method, url, *args, **kwargs):\n hub = Hub.current\n if hub.get_integration(StdlibIntegration) is None:\n return real_putrequest(self, method, url, *args, **kwargs)\n\n host = self.host\n port = self.port\n default_port = self.default_port\n\n real_url = url\n if not real_url.startswith((\"http://\", \"https://\")):\n real_url = \"%s://%s%s%s\" % (\n default_port == 443 and \"https\" or \"http\",\n host,\n port != default_port and \":%s\" % port or \"\",\n url,\n )\n\n recorder = record_http_request(hub, real_url, method)\n data_dict = recorder.__enter__()\n\n try:\n rv = real_putrequest(self, method, url, *args, **kwargs)\n\n for key, value in hub.iter_trace_propagation_headers():\n self.putheader(key, value)\n except Exception:\n recorder.__exit__(*sys.exc_info())\n raise\n\n self._sentrysdk_recorder = recorder\n self._sentrysdk_data_dict = data_dict\n\n return rv\n\n def getresponse(self, *args, **kwargs):\n recorder = getattr(self, \"_sentrysdk_recorder\", None)\n\n if recorder is None:\n return real_getresponse(self, *args, **kwargs)\n\n data_dict = getattr(self, \"_sentrysdk_data_dict\", None)\n\n try:\n rv = real_getresponse(self, *args, **kwargs)\n\n if data_dict is not None:\n data_dict[\"httplib_response\"] = rv\n data_dict[\"status_code\"] = rv.status\n data_dict[\"reason\"] = rv.reason\n except TypeError:\n # python-requests provokes a typeerror to discover py3 vs py2 differences\n #\n # > TypeError(\"getresponse() got an unexpected keyword argument 'buffering'\")\n raise\n except Exception:\n recorder.__exit__(*sys.exc_info())\n raise\n else:\n recorder.__exit__(None, None, None)\n\n return rv\n\n HTTPConnection.putrequest = putrequest\n HTTPConnection.getresponse = getresponse\n\n\ndef _init_argument(args, kwargs, name, position, setdefault_callback=None):\n \"\"\"\n given (*args, **kwargs) of a function call, retrieve (and optionally set a\n default for) an argument by either name or position.\n\n This is useful for wrapping functions with complex type signatures and\n extracting a few arguments without needing to redefine that function's\n entire type signature.\n \"\"\"\n\n if name in kwargs:\n rv = kwargs[name]\n if rv is None and setdefault_callback is not None:\n rv = kwargs[name] = setdefault_callback()\n elif position < len(args):\n rv = args[position]\n if rv is None and setdefault_callback is not None:\n rv = args[position] = setdefault_callback()\n else:\n rv = setdefault_callback and setdefault_callback()\n if rv is not None:\n kwargs[name] = rv\n\n return rv\n\n\ndef _install_subprocess():\n old_popen_init = subprocess.Popen.__init__\n\n def sentry_patched_popen_init(self, *a, **kw):\n hub = Hub.current\n if hub.get_integration(StdlibIntegration) is None:\n return old_popen_init(self, *a, **kw)\n\n # do not setdefault! args is required by Popen, doing setdefault would\n # make invalid calls valid\n args = _init_argument(a, kw, \"args\", 0) or []\n cwd = _init_argument(a, kw, \"cwd\", 10)\n\n env = None\n\n for k, v in hub.iter_trace_propagation_headers():\n if env is None:\n env = _init_argument(a, kw, \"env\", 11, lambda: dict(os.environ))\n env[\"SUBPROCESS_\" + k.upper().replace(\"-\", \"_\")] = v\n\n with hub.span(op=\"subprocess\", description=\" \".join(map(str, args))) as span:\n span.set_tag(\"subprocess.cwd\", cwd)\n\n return old_popen_init(self, *a, **kw)\n\n subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore\n\n\ndef get_subprocess_traceparent_headers():\n return EnvironHeaders(os.environ, prefix=\"SUBPROCESS_\")\n", "path": "sentry_sdk/integrations/stdlib.py"}]}
| 2,866 | 864 |
gh_patches_debug_33845
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-6677
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/rule_based_profiler/helpers/cardinality_checker.py]
1 import abc
2 import enum
3 from dataclasses import dataclass
4 from typing import Optional, Tuple, Union
5
6 from great_expectations.core.util import convert_to_json_serializable
7 from great_expectations.exceptions import ProfilerConfigurationError
8 from great_expectations.types import SerializableDictDot
9
10
11 @dataclass(frozen=True)
12 class CardinalityLimit(abc.ABC, SerializableDictDot):
13 name: str
14
15
16 @dataclass(frozen=True)
17 class RelativeCardinalityLimit(CardinalityLimit):
18 max_proportion_unique: float
19 metric_name_defining_limit: str = "column.unique_proportion"
20
21 def to_json_dict(self) -> dict:
22 return convert_to_json_serializable(
23 {
24 "name": self.name,
25 "max_proportion_unique": self.max_proportion_unique,
26 "metric_name_defining_limit": self.metric_name_defining_limit,
27 }
28 )
29
30
31 @dataclass(frozen=True)
32 class AbsoluteCardinalityLimit(CardinalityLimit):
33 max_unique_values: int
34 metric_name_defining_limit: str = "column.distinct_values.count"
35
36 def to_json_dict(self) -> dict:
37 return convert_to_json_serializable(
38 {
39 "name": self.name,
40 "max_proportion_unique": self.max_unique_values,
41 "metric_name_defining_limit": self.metric_name_defining_limit,
42 }
43 )
44
45
46 class CardinalityLimitMode(enum.Enum):
47 """Preset limits based on unique values (cardinality)
48
49 Defines relative (ratio) and absolute number of records (table rows) that
50 correspond to each cardinality category.
51
52 Used to determine appropriate Expectation configurations based on data.
53 """
54
55 ZERO = AbsoluteCardinalityLimit("ZERO", 0)
56 ONE = AbsoluteCardinalityLimit("ONE", 1)
57 TWO = AbsoluteCardinalityLimit("TWO", 2)
58 VERY_FEW = AbsoluteCardinalityLimit("VERY_FEW", 10)
59 FEW = AbsoluteCardinalityLimit("FEW", 100)
60 SOME = AbsoluteCardinalityLimit("SOME", 1000)
61 MANY = AbsoluteCardinalityLimit("MANY", 10000)
62 VERY_MANY = AbsoluteCardinalityLimit("VERY_MANY", 100000)
63 UNIQUE = RelativeCardinalityLimit("UNIQUE", 1.0)
64 ABS_10 = AbsoluteCardinalityLimit("ABS_10", 10)
65 ABS_100 = AbsoluteCardinalityLimit("ABS_100", 100)
66 ABS_1000 = AbsoluteCardinalityLimit("ABS_1000", 1000)
67 ABS_10_000 = AbsoluteCardinalityLimit("ABS_10_000", int(1e4))
68 ABS_100_000 = AbsoluteCardinalityLimit("ABS_100_000", int(1e5))
69 ABS_1_000_000 = AbsoluteCardinalityLimit("ABS_1_000_000", int(1e6))
70 ABS_10_000_000 = AbsoluteCardinalityLimit("ABS_10_000_000", int(1e7))
71 ABS_100_000_000 = AbsoluteCardinalityLimit("ABS_100_000_000", int(1e8))
72 ABS_1_000_000_000 = AbsoluteCardinalityLimit("ABS_1_000_000_000", int(1e9))
73 REL_0 = RelativeCardinalityLimit("REL_0", 0.0)
74 REL_001 = RelativeCardinalityLimit("REL_001", 1e-5)
75 REL_01 = RelativeCardinalityLimit("REL_01", 1e-4)
76 REL_0_1 = RelativeCardinalityLimit("REL_0_1", 1e-3)
77 REL_1 = RelativeCardinalityLimit("REL_1", 1e-2)
78 REL_10 = RelativeCardinalityLimit("REL_10", 0.10)
79 REL_25 = RelativeCardinalityLimit("REL_25", 0.25)
80 REL_50 = RelativeCardinalityLimit("REL_50", 0.50)
81 REL_75 = RelativeCardinalityLimit("REL_75", 0.75)
82 REL_100 = RelativeCardinalityLimit("REL_100", 1.0)
83 ONE_PCT = RelativeCardinalityLimit("ONE_PCT", 0.01)
84 TEN_PCT = RelativeCardinalityLimit("TEN_PCT", 0.10)
85
86
87 class CardinalityChecker:
88 """Handles cardinality checking given cardinality limit mode and measured value.
89
90 This class also validates cardinality limit settings and converts from
91 various types of settings. You can choose one of the attributes listed
92 below to create an instance.
93
94 Attributes:
95 cardinality_limit_mode: CardinalityLimitMode or string name of the mode
96 defining the maximum allowable cardinality.
97 max_unique_values: number of max unique rows for a custom
98 cardinality limit.
99 max_proportion_unique: proportion of unique values for a
100 custom cardinality limit.
101 """
102
103 SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES: Tuple[
104 Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]
105 ] = (
106 AbsoluteCardinalityLimit,
107 RelativeCardinalityLimit,
108 )
109 SUPPORTED_LIMIT_MODE_CLASS_NAMES: Tuple[str] = (
110 mode.__name__ for mode in SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES
111 )
112 SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS: Tuple[str] = (
113 mode.name for mode in CardinalityLimitMode
114 )
115
116 def __init__(
117 self,
118 cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,
119 max_unique_values: Optional[int] = None,
120 max_proportion_unique: Optional[float] = None,
121 ) -> None:
122 self._cardinality_limit_mode = self._convert_to_cardinality_limit_mode(
123 cardinality_limit_mode=cardinality_limit_mode,
124 max_unique_values=max_unique_values,
125 max_proportion_unique=max_proportion_unique,
126 )
127
128 @property
129 def cardinality_limit_mode(
130 self,
131 ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:
132 return self._cardinality_limit_mode
133
134 def cardinality_within_limit(self, metric_value: float) -> bool:
135 """Determine if the cardinality is within configured limit.
136
137 The metric_value supplied should be either a proportion of unique values
138 or number of unique values based on the configured cardinality limit.
139
140 Args:
141 metric_value: int if number of unique values, float if proportion
142 of unique values.
143
144 Returns:
145 Boolean of whether the cardinality is within the configured limit
146 """
147 self._validate_metric_value(metric_value=metric_value)
148 if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):
149 return metric_value <= self._cardinality_limit_mode.max_unique_values
150 elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):
151 return metric_value <= self._cardinality_limit_mode.max_proportion_unique
152
153 @staticmethod
154 def _validate_metric_value(metric_value: float) -> None:
155 if not isinstance(metric_value, (int, float)):
156 raise ProfilerConfigurationError(
157 f"Value of measured cardinality must be of type int or float, you provided {type(metric_value)}"
158 )
159
160 if metric_value < 0.00:
161 raise ProfilerConfigurationError(
162 f"Value of cardinality (number of rows or percent unique) should be greater than 0.00, your value is {metric_value}"
163 )
164
165 @staticmethod
166 def _convert_to_cardinality_limit_mode(
167 cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,
168 max_unique_values: Optional[int] = None,
169 max_proportion_unique: Optional[float] = None,
170 ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:
171 validate_input_parameters(
172 cardinality_limit_mode=cardinality_limit_mode,
173 max_unique_values=max_unique_values,
174 max_proportion_unique=max_proportion_unique,
175 )
176
177 if cardinality_limit_mode is not None:
178 if isinstance(cardinality_limit_mode, str):
179 try:
180 return CardinalityLimitMode[cardinality_limit_mode.upper()].value
181 except KeyError:
182 raise ProfilerConfigurationError(
183 f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}"
184 )
185 elif isinstance(cardinality_limit_mode, dict):
186 validate_input_parameters(
187 cardinality_limit_mode=cardinality_limit_mode.get("name"),
188 max_unique_values=cardinality_limit_mode.get("max_unique_values"),
189 max_proportion_unique=cardinality_limit_mode.get(
190 "max_proportion_unique"
191 ),
192 required_num_supplied_params=2,
193 )
194 try:
195 return AbsoluteCardinalityLimit(
196 name=cardinality_limit_mode["name"],
197 max_unique_values=cardinality_limit_mode["max_unique_values"],
198 metric_name_defining_limit=cardinality_limit_mode[
199 "metric_name_defining_limit"
200 ],
201 )
202 except (KeyError, ValueError):
203 try:
204 return RelativeCardinalityLimit(
205 name=cardinality_limit_mode["name"],
206 max_proportion_unique=cardinality_limit_mode[
207 "max_proportion_unique"
208 ],
209 metric_name_defining_limit=cardinality_limit_mode[
210 "metric_name_defining_limit"
211 ],
212 )
213 except (KeyError, ValueError):
214 raise ProfilerConfigurationError(
215 f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}"
216 )
217 else:
218 return cardinality_limit_mode.value
219
220 if max_unique_values is not None:
221 return AbsoluteCardinalityLimit(
222 name=f"CUSTOM_ABS_{max_unique_values}",
223 max_unique_values=max_unique_values,
224 )
225
226 if max_proportion_unique is not None:
227 return RelativeCardinalityLimit(
228 name=f"CUSTOM_REL_{max_proportion_unique}",
229 max_proportion_unique=max_proportion_unique,
230 )
231
232
233 def validate_input_parameters(
234 cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,
235 max_unique_values: Optional[int] = None,
236 max_proportion_unique: Optional[int] = None,
237 required_num_supplied_params: int = 1,
238 ) -> None:
239 num_supplied_params: int = sum(
240 [
241 0 if param is None else 1
242 for param in (
243 cardinality_limit_mode,
244 max_unique_values,
245 max_proportion_unique,
246 )
247 ]
248 )
249 if num_supplied_params != required_num_supplied_params:
250 raise ProfilerConfigurationError(
251 f"Please pass ONE of the following parameters: cardinality_limit_mode, max_unique_values, max_proportion_unique, you passed {num_supplied_params} parameters."
252 )
253
254 if cardinality_limit_mode is not None:
255 if not isinstance(cardinality_limit_mode, (str, CardinalityLimitMode, dict)):
256 raise ProfilerConfigurationError(
257 f"Please specify a supported cardinality limit type, supported classes are {','.join(CardinalityChecker.SUPPORTED_LIMIT_MODE_CLASS_NAMES)} and supported strings are {','.join(CardinalityChecker.SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS)}"
258 )
259
260 if required_num_supplied_params == 2:
261 try:
262 return CardinalityLimitMode[cardinality_limit_mode.upper()].value
263 except KeyError:
264 raise ProfilerConfigurationError(
265 f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}"
266 )
267
268 if max_unique_values is not None:
269 if not isinstance(max_unique_values, int):
270 raise ProfilerConfigurationError(
271 f"Please specify an int, you specified a {type(max_unique_values)}"
272 )
273
274 if max_proportion_unique is not None:
275 if not isinstance(max_proportion_unique, (float, int)):
276 raise ProfilerConfigurationError(
277 f"Please specify a float or int, you specified a {type(max_proportion_unique)}"
278 )
279
[end of great_expectations/rule_based_profiler/helpers/cardinality_checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py
--- a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py
+++ b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py
@@ -1,7 +1,7 @@
import abc
import enum
from dataclasses import dataclass
-from typing import Optional, Tuple, Union
+from typing import Optional, Tuple, Union, cast
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions import ProfilerConfigurationError
@@ -147,9 +147,14 @@
self._validate_metric_value(metric_value=metric_value)
if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):
return metric_value <= self._cardinality_limit_mode.max_unique_values
- elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):
+
+ if isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):
return metric_value <= self._cardinality_limit_mode.max_proportion_unique
+ raise ValueError(
+ f'Unknown "cardinality_limit_mode" mode "{self._cardinality_limit_mode}" encountered.'
+ )
+
@staticmethod
def _validate_metric_value(metric_value: float) -> None:
if not isinstance(metric_value, (int, float)):
@@ -215,7 +220,7 @@
f"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}"
)
else:
- return cardinality_limit_mode.value
+ return cast(CardinalityLimitMode, cardinality_limit_mode).value
if max_unique_values is not None:
return AbsoluteCardinalityLimit(
|
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py\n--- a/great_expectations/rule_based_profiler/helpers/cardinality_checker.py\n+++ b/great_expectations/rule_based_profiler/helpers/cardinality_checker.py\n@@ -1,7 +1,7 @@\n import abc\n import enum\n from dataclasses import dataclass\n-from typing import Optional, Tuple, Union\n+from typing import Optional, Tuple, Union, cast\n \n from great_expectations.core.util import convert_to_json_serializable\n from great_expectations.exceptions import ProfilerConfigurationError\n@@ -147,9 +147,14 @@\n self._validate_metric_value(metric_value=metric_value)\n if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_unique_values\n- elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n+\n+ if isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_proportion_unique\n \n+ raise ValueError(\n+ f'Unknown \"cardinality_limit_mode\" mode \"{self._cardinality_limit_mode}\" encountered.'\n+ )\n+\n @staticmethod\n def _validate_metric_value(metric_value: float) -> None:\n if not isinstance(metric_value, (int, float)):\n@@ -215,7 +220,7 @@\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n else:\n- return cardinality_limit_mode.value\n+ return cast(CardinalityLimitMode, cardinality_limit_mode).value\n \n if max_unique_values is not None:\n return AbsoluteCardinalityLimit(\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import abc\nimport enum\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nfrom great_expectations.core.util import convert_to_json_serializable\nfrom great_expectations.exceptions import ProfilerConfigurationError\nfrom great_expectations.types import SerializableDictDot\n\n\n@dataclass(frozen=True)\nclass CardinalityLimit(abc.ABC, SerializableDictDot):\n name: str\n\n\n@dataclass(frozen=True)\nclass RelativeCardinalityLimit(CardinalityLimit):\n max_proportion_unique: float\n metric_name_defining_limit: str = \"column.unique_proportion\"\n\n def to_json_dict(self) -> dict:\n return convert_to_json_serializable(\n {\n \"name\": self.name,\n \"max_proportion_unique\": self.max_proportion_unique,\n \"metric_name_defining_limit\": self.metric_name_defining_limit,\n }\n )\n\n\n@dataclass(frozen=True)\nclass AbsoluteCardinalityLimit(CardinalityLimit):\n max_unique_values: int\n metric_name_defining_limit: str = \"column.distinct_values.count\"\n\n def to_json_dict(self) -> dict:\n return convert_to_json_serializable(\n {\n \"name\": self.name,\n \"max_proportion_unique\": self.max_unique_values,\n \"metric_name_defining_limit\": self.metric_name_defining_limit,\n }\n )\n\n\nclass CardinalityLimitMode(enum.Enum):\n \"\"\"Preset limits based on unique values (cardinality)\n\n Defines relative (ratio) and absolute number of records (table rows) that\n correspond to each cardinality category.\n\n Used to determine appropriate Expectation configurations based on data.\n \"\"\"\n\n ZERO = AbsoluteCardinalityLimit(\"ZERO\", 0)\n ONE = AbsoluteCardinalityLimit(\"ONE\", 1)\n TWO = AbsoluteCardinalityLimit(\"TWO\", 2)\n VERY_FEW = AbsoluteCardinalityLimit(\"VERY_FEW\", 10)\n FEW = AbsoluteCardinalityLimit(\"FEW\", 100)\n SOME = AbsoluteCardinalityLimit(\"SOME\", 1000)\n MANY = AbsoluteCardinalityLimit(\"MANY\", 10000)\n VERY_MANY = AbsoluteCardinalityLimit(\"VERY_MANY\", 100000)\n UNIQUE = RelativeCardinalityLimit(\"UNIQUE\", 1.0)\n ABS_10 = AbsoluteCardinalityLimit(\"ABS_10\", 10)\n ABS_100 = AbsoluteCardinalityLimit(\"ABS_100\", 100)\n ABS_1000 = AbsoluteCardinalityLimit(\"ABS_1000\", 1000)\n ABS_10_000 = AbsoluteCardinalityLimit(\"ABS_10_000\", int(1e4))\n ABS_100_000 = AbsoluteCardinalityLimit(\"ABS_100_000\", int(1e5))\n ABS_1_000_000 = AbsoluteCardinalityLimit(\"ABS_1_000_000\", int(1e6))\n ABS_10_000_000 = AbsoluteCardinalityLimit(\"ABS_10_000_000\", int(1e7))\n ABS_100_000_000 = AbsoluteCardinalityLimit(\"ABS_100_000_000\", int(1e8))\n ABS_1_000_000_000 = AbsoluteCardinalityLimit(\"ABS_1_000_000_000\", int(1e9))\n REL_0 = RelativeCardinalityLimit(\"REL_0\", 0.0)\n REL_001 = RelativeCardinalityLimit(\"REL_001\", 1e-5)\n REL_01 = RelativeCardinalityLimit(\"REL_01\", 1e-4)\n REL_0_1 = RelativeCardinalityLimit(\"REL_0_1\", 1e-3)\n REL_1 = RelativeCardinalityLimit(\"REL_1\", 1e-2)\n REL_10 = RelativeCardinalityLimit(\"REL_10\", 0.10)\n REL_25 = RelativeCardinalityLimit(\"REL_25\", 0.25)\n REL_50 = RelativeCardinalityLimit(\"REL_50\", 0.50)\n REL_75 = RelativeCardinalityLimit(\"REL_75\", 0.75)\n REL_100 = RelativeCardinalityLimit(\"REL_100\", 1.0)\n ONE_PCT = RelativeCardinalityLimit(\"ONE_PCT\", 0.01)\n TEN_PCT = RelativeCardinalityLimit(\"TEN_PCT\", 0.10)\n\n\nclass CardinalityChecker:\n \"\"\"Handles cardinality checking given cardinality limit mode and measured value.\n\n This class also validates cardinality limit settings and converts from\n various types of settings. You can choose one of the attributes listed\n below to create an instance.\n\n Attributes:\n cardinality_limit_mode: CardinalityLimitMode or string name of the mode\n defining the maximum allowable cardinality.\n max_unique_values: number of max unique rows for a custom\n cardinality limit.\n max_proportion_unique: proportion of unique values for a\n custom cardinality limit.\n \"\"\"\n\n SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES: Tuple[\n Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]\n ] = (\n AbsoluteCardinalityLimit,\n RelativeCardinalityLimit,\n )\n SUPPORTED_LIMIT_MODE_CLASS_NAMES: Tuple[str] = (\n mode.__name__ for mode in SUPPORTED_CARDINALITY_LIMIT_MODE_CLASSES\n )\n SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS: Tuple[str] = (\n mode.name for mode in CardinalityLimitMode\n )\n\n def __init__(\n self,\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[float] = None,\n ) -> None:\n self._cardinality_limit_mode = self._convert_to_cardinality_limit_mode(\n cardinality_limit_mode=cardinality_limit_mode,\n max_unique_values=max_unique_values,\n max_proportion_unique=max_proportion_unique,\n )\n\n @property\n def cardinality_limit_mode(\n self,\n ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:\n return self._cardinality_limit_mode\n\n def cardinality_within_limit(self, metric_value: float) -> bool:\n \"\"\"Determine if the cardinality is within configured limit.\n\n The metric_value supplied should be either a proportion of unique values\n or number of unique values based on the configured cardinality limit.\n\n Args:\n metric_value: int if number of unique values, float if proportion\n of unique values.\n\n Returns:\n Boolean of whether the cardinality is within the configured limit\n \"\"\"\n self._validate_metric_value(metric_value=metric_value)\n if isinstance(self._cardinality_limit_mode, AbsoluteCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_unique_values\n elif isinstance(self._cardinality_limit_mode, RelativeCardinalityLimit):\n return metric_value <= self._cardinality_limit_mode.max_proportion_unique\n\n @staticmethod\n def _validate_metric_value(metric_value: float) -> None:\n if not isinstance(metric_value, (int, float)):\n raise ProfilerConfigurationError(\n f\"Value of measured cardinality must be of type int or float, you provided {type(metric_value)}\"\n )\n\n if metric_value < 0.00:\n raise ProfilerConfigurationError(\n f\"Value of cardinality (number of rows or percent unique) should be greater than 0.00, your value is {metric_value}\"\n )\n\n @staticmethod\n def _convert_to_cardinality_limit_mode(\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[float] = None,\n ) -> Union[AbsoluteCardinalityLimit, RelativeCardinalityLimit]:\n validate_input_parameters(\n cardinality_limit_mode=cardinality_limit_mode,\n max_unique_values=max_unique_values,\n max_proportion_unique=max_proportion_unique,\n )\n\n if cardinality_limit_mode is not None:\n if isinstance(cardinality_limit_mode, str):\n try:\n return CardinalityLimitMode[cardinality_limit_mode.upper()].value\n except KeyError:\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n elif isinstance(cardinality_limit_mode, dict):\n validate_input_parameters(\n cardinality_limit_mode=cardinality_limit_mode.get(\"name\"),\n max_unique_values=cardinality_limit_mode.get(\"max_unique_values\"),\n max_proportion_unique=cardinality_limit_mode.get(\n \"max_proportion_unique\"\n ),\n required_num_supplied_params=2,\n )\n try:\n return AbsoluteCardinalityLimit(\n name=cardinality_limit_mode[\"name\"],\n max_unique_values=cardinality_limit_mode[\"max_unique_values\"],\n metric_name_defining_limit=cardinality_limit_mode[\n \"metric_name_defining_limit\"\n ],\n )\n except (KeyError, ValueError):\n try:\n return RelativeCardinalityLimit(\n name=cardinality_limit_mode[\"name\"],\n max_proportion_unique=cardinality_limit_mode[\n \"max_proportion_unique\"\n ],\n metric_name_defining_limit=cardinality_limit_mode[\n \"metric_name_defining_limit\"\n ],\n )\n except (KeyError, ValueError):\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n else:\n return cardinality_limit_mode.value\n\n if max_unique_values is not None:\n return AbsoluteCardinalityLimit(\n name=f\"CUSTOM_ABS_{max_unique_values}\",\n max_unique_values=max_unique_values,\n )\n\n if max_proportion_unique is not None:\n return RelativeCardinalityLimit(\n name=f\"CUSTOM_REL_{max_proportion_unique}\",\n max_proportion_unique=max_proportion_unique,\n )\n\n\ndef validate_input_parameters(\n cardinality_limit_mode: Optional[Union[str, CardinalityLimitMode, dict]] = None,\n max_unique_values: Optional[int] = None,\n max_proportion_unique: Optional[int] = None,\n required_num_supplied_params: int = 1,\n) -> None:\n num_supplied_params: int = sum(\n [\n 0 if param is None else 1\n for param in (\n cardinality_limit_mode,\n max_unique_values,\n max_proportion_unique,\n )\n ]\n )\n if num_supplied_params != required_num_supplied_params:\n raise ProfilerConfigurationError(\n f\"Please pass ONE of the following parameters: cardinality_limit_mode, max_unique_values, max_proportion_unique, you passed {num_supplied_params} parameters.\"\n )\n\n if cardinality_limit_mode is not None:\n if not isinstance(cardinality_limit_mode, (str, CardinalityLimitMode, dict)):\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality limit type, supported classes are {','.join(CardinalityChecker.SUPPORTED_LIMIT_MODE_CLASS_NAMES)} and supported strings are {','.join(CardinalityChecker.SUPPORTED_CARDINALITY_LIMIT_MODE_STRINGS)}\"\n )\n\n if required_num_supplied_params == 2:\n try:\n return CardinalityLimitMode[cardinality_limit_mode.upper()].value\n except KeyError:\n raise ProfilerConfigurationError(\n f\"Please specify a supported cardinality mode. Supported cardinality modes are {[member.name for member in CardinalityLimitMode]}\"\n )\n\n if max_unique_values is not None:\n if not isinstance(max_unique_values, int):\n raise ProfilerConfigurationError(\n f\"Please specify an int, you specified a {type(max_unique_values)}\"\n )\n\n if max_proportion_unique is not None:\n if not isinstance(max_proportion_unique, (float, int)):\n raise ProfilerConfigurationError(\n f\"Please specify a float or int, you specified a {type(max_proportion_unique)}\"\n )\n", "path": "great_expectations/rule_based_profiler/helpers/cardinality_checker.py"}]}
| 3,988 | 391 |
gh_patches_debug_32588
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1503
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reddit: shortlinks are not handled
Reddit supports (and promotes in the comments page sidebar) a short link format for threads, `http(s)://redd.it/{id}`, where the `id` comes from here in a full-size link: `https://www.reddit.com/r/subreddit/comments/{id}/slugified_thread_title/`.
It should be trivial to add detection and handling of these links in the `reddit` module.
Not assigning to a milestone immediately, because I'd like to release this in a hypothetical version 6.7.0 *if* one happens before 7.0 (but none is planned). It does seem slightly more significant a change than would belong in one of the planned 6.6.x releases, unfortunately.
</issue>
<code>
[start of sopel/modules/reddit.py]
1 # coding=utf-8
2 # Author: Elsie Powell, embolalia.com
3 from __future__ import unicode_literals, absolute_import, print_function, division
4
5 from sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP
6 from sopel.formatting import bold, color, colors
7 from sopel.web import USER_AGENT
8 from sopel.tools import time
9 import datetime as dt
10 import praw
11 import re
12 import sys
13 if sys.version_info.major >= 3:
14 unicode = str
15 if sys.version_info.minor >= 4:
16 from html import unescape
17 else:
18 from html.parser import HTMLParser
19 unescape = HTMLParser().unescape
20 else:
21 from HTMLParser import HTMLParser
22 unescape = HTMLParser().unescape
23
24
25 domain = r'https?://(?:www\.|old\.|pay\.|ssl\.|[a-z]{2}\.)?reddit\.com'
26 post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain
27 user_url = r'%s/u(ser)?/([\w-]+)' % domain
28 post_regex = re.compile(post_url)
29 user_regex = re.compile(user_url)
30 spoiler_subs = [
31 'stevenuniverse',
32 'onepunchman',
33 ]
34
35
36 def setup(bot):
37 bot.register_url_callback(post_regex, rpost_info)
38 bot.register_url_callback(user_regex, redditor_info)
39
40
41 def shutdown(bot):
42 bot.unregister_url_callback(post_regex)
43 bot.unregister_url_callback(user_regex)
44
45
46 @rule('.*%s.*' % post_url)
47 def rpost_info(bot, trigger, match=None):
48 match = match or trigger
49 try:
50 r = praw.Reddit(
51 user_agent=USER_AGENT,
52 client_id='6EiphT6SSQq7FQ',
53 client_secret=None,
54 )
55 s = r.submission(id=match.group(2))
56 except Exception:
57 r = praw.Reddit(user_agent=USER_AGENT)
58 s = r.get_submission(submission_id=match.group(2))
59
60 message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '
61 '{comments} comments | Posted by {author} | '
62 'Created at {created}')
63
64 subreddit = s.subreddit.display_name
65 if s.is_self:
66 link = '(self.{})'.format(subreddit)
67 else:
68 link = '({}) to r/{}'.format(s.url, subreddit)
69
70 if s.over_18:
71 if subreddit.lower() in spoiler_subs:
72 nsfw = bold(color(' [SPOILERS]', colors.RED))
73 else:
74 nsfw = bold(color(' [NSFW]', colors.RED))
75
76 sfw = bot.db.get_channel_value(trigger.sender, 'sfw')
77 if sfw:
78 link = '(link hidden)'
79 bot.write(['KICK', trigger.sender, trigger.nick,
80 'Linking to NSFW content in a SFW channel.'])
81 else:
82 nsfw = ''
83
84 if s.author:
85 author = s.author.name
86 else:
87 author = '[deleted]'
88
89 tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,
90 trigger.sender)
91 time_created = dt.datetime.utcfromtimestamp(s.created_utc)
92 created = time.format_time(bot.db, bot.config, tz, trigger.nick,
93 trigger.sender, time_created)
94
95 if s.score > 0:
96 point_color = colors.GREEN
97 else:
98 point_color = colors.RED
99
100 percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)
101
102 title = unescape(s.title)
103 message = message.format(
104 title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,
105 comments=s.num_comments, author=author, created=created)
106
107 bot.say(message)
108
109
110 # If you change this, you'll have to change some other things...
111 @commands('redditor')
112 @example('.redditor poem_for_your_sprog')
113 def redditor_info(bot, trigger, match=None):
114 """Show information about the given Redditor"""
115 commanded = re.match(bot.config.core.prefix + 'redditor', trigger)
116 r = praw.Reddit(
117 user_agent=USER_AGENT,
118 client_id='6EiphT6SSQq7FQ',
119 client_secret=None,
120 )
121 match = match or trigger
122 try:
123 u = r.get_redditor(match.group(2))
124 except Exception: # TODO: Be specific
125 if commanded:
126 bot.say('No such Redditor.')
127 return NOLIMIT
128 else:
129 return
130 # Fail silently if it wasn't an explicit command.
131
132 message = '[REDDITOR] ' + u.name
133 now = dt.datetime.utcnow()
134 cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)
135 cakeday_start = cakeday_start.replace(year=now.year)
136 day = dt.timedelta(days=1)
137 year_div_by_400 = now.year % 400 == 0
138 year_div_by_100 = now.year % 100 == 0
139 year_div_by_4 = now.year % 4 == 0
140 is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)
141 if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):
142 # If cake day is 2/29 and it's not a leap year, cake day is 1/3.
143 # Cake day begins at exact account creation time.
144 is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)
145 else:
146 is_cakeday = cakeday_start <= now <= cakeday_start + day
147
148 if is_cakeday:
149 message = message + ' | 13Cake day'
150 if commanded:
151 message = message + ' | https://reddit.com/u/' + u.name
152 if u.is_gold:
153 message = message + ' | 08Gold'
154 if u.is_mod:
155 message = message + ' | 05Mod'
156 message = message + (' | Link: ' + str(u.link_karma) +
157 ' | Comment: ' + str(u.comment_karma))
158
159 bot.say(message)
160
161
162 # If you change the groups here, you'll have to change some things above.
163 @rule('.*%s.*' % user_url)
164 def auto_redditor_info(bot, trigger):
165 redditor_info(bot, trigger)
166
167
168 @require_chanmsg('.setsfw is only permitted in channels')
169 @commands('setsafeforwork', 'setsfw')
170 @example('.setsfw true')
171 @example('.setsfw false')
172 def update_channel(bot, trigger):
173 """
174 Sets the Safe for Work status (true or false) for the current
175 channel. Defaults to false.
176 """
177 if bot.channels[trigger.sender].privileges[trigger.nick] < OP:
178 return
179 else:
180 param = 'true'
181 if trigger.group(2) and trigger.group(3):
182 param = trigger.group(3).strip().lower()
183 sfw = param == 'true'
184 bot.db.set_channel_value(trigger.sender, 'sfw', sfw)
185 if sfw:
186 bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)
187 else:
188 bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)
189
190
191 @commands('getsafeforwork', 'getsfw')
192 @example('.getsfw [channel]')
193 def get_channel_sfw(bot, trigger):
194 """
195 Gets the preferred channel's Safe for Work status, or the current
196 channel's status if no channel given.
197 """
198 channel = trigger.group(2)
199 if not channel:
200 channel = trigger.sender
201 if channel.is_nick():
202 return bot.say('.getsfw with no channel param is only permitted in channels')
203
204 channel = channel.strip()
205
206 sfw = bot.db.get_channel_value(channel, 'sfw')
207 if sfw:
208 bot.say('%s is flagged as SFW' % channel)
209 else:
210 bot.say('%s is flagged as NSFW' % channel)
211
[end of sopel/modules/reddit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py
--- a/sopel/modules/reddit.py
+++ b/sopel/modules/reddit.py
@@ -23,9 +23,11 @@
domain = r'https?://(?:www\.|old\.|pay\.|ssl\.|[a-z]{2}\.)?reddit\.com'
-post_url = r'%s/r/(.*?)/comments/([\w-]+)' % domain
+post_url = r'%s/r/.*?/comments/([\w-]+)' % domain
+short_post_url = r'https?://redd.it/([\w-]+)'
user_url = r'%s/u(ser)?/([\w-]+)' % domain
post_regex = re.compile(post_url)
+short_post_regex = re.compile(short_post_url)
user_regex = re.compile(user_url)
spoiler_subs = [
'stevenuniverse',
@@ -35,15 +37,18 @@
def setup(bot):
bot.register_url_callback(post_regex, rpost_info)
+ bot.register_url_callback(short_post_regex, rpost_info)
bot.register_url_callback(user_regex, redditor_info)
def shutdown(bot):
bot.unregister_url_callback(post_regex)
+ bot.unregister_url_callback(short_post_regex)
bot.unregister_url_callback(user_regex)
@rule('.*%s.*' % post_url)
+@rule('.*%s.*' % short_post_url)
def rpost_info(bot, trigger, match=None):
match = match or trigger
try:
@@ -52,10 +57,10 @@
client_id='6EiphT6SSQq7FQ',
client_secret=None,
)
- s = r.submission(id=match.group(2))
+ s = r.submission(id=match.group(1))
except Exception:
r = praw.Reddit(user_agent=USER_AGENT)
- s = r.get_submission(submission_id=match.group(2))
+ s = r.get_submission(submission_id=match.group(1))
message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '
'{comments} comments | Posted by {author} | '
|
{"golden_diff": "diff --git a/sopel/modules/reddit.py b/sopel/modules/reddit.py\n--- a/sopel/modules/reddit.py\n+++ b/sopel/modules/reddit.py\n@@ -23,9 +23,11 @@\n \n \n domain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\n-post_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\n+post_url = r'%s/r/.*?/comments/([\\w-]+)' % domain\n+short_post_url = r'https?://redd.it/([\\w-]+)'\n user_url = r'%s/u(ser)?/([\\w-]+)' % domain\n post_regex = re.compile(post_url)\n+short_post_regex = re.compile(short_post_url)\n user_regex = re.compile(user_url)\n spoiler_subs = [\n 'stevenuniverse',\n@@ -35,15 +37,18 @@\n \n def setup(bot):\n bot.register_url_callback(post_regex, rpost_info)\n+ bot.register_url_callback(short_post_regex, rpost_info)\n bot.register_url_callback(user_regex, redditor_info)\n \n \n def shutdown(bot):\n bot.unregister_url_callback(post_regex)\n+ bot.unregister_url_callback(short_post_regex)\n bot.unregister_url_callback(user_regex)\n \n \n @rule('.*%s.*' % post_url)\n+@rule('.*%s.*' % short_post_url)\n def rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n@@ -52,10 +57,10 @@\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n- s = r.submission(id=match.group(2))\n+ s = r.submission(id=match.group(1))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n- s = r.get_submission(submission_id=match.group(2))\n+ s = r.get_submission(submission_id=match.group(1))\n \n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n", "issue": "reddit: shortlinks are not handled\nReddit supports (and promotes in the comments page sidebar) a short link format for threads, `http(s)://redd.it/{id}`, where the `id` comes from here in a full-size link: `https://www.reddit.com/r/subreddit/comments/{id}/slugified_thread_title/`.\n\nIt should be trivial to add detection and handling of these links in the `reddit` module.\n\nNot assigning to a milestone immediately, because I'd like to release this in a hypothetical version 6.7.0 *if* one happens before 7.0 (but none is planned). It does seem slightly more significant a change than would belong in one of the planned 6.6.x releases, unfortunately.\n", "before_files": [{"content": "# coding=utf-8\n# Author: Elsie Powell, embolalia.com\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.module import commands, rule, example, require_chanmsg, NOLIMIT, OP\nfrom sopel.formatting import bold, color, colors\nfrom sopel.web import USER_AGENT\nfrom sopel.tools import time\nimport datetime as dt\nimport praw\nimport re\nimport sys\nif sys.version_info.major >= 3:\n unicode = str\n if sys.version_info.minor >= 4:\n from html import unescape\n else:\n from html.parser import HTMLParser\n unescape = HTMLParser().unescape\nelse:\n from HTMLParser import HTMLParser\n unescape = HTMLParser().unescape\n\n\ndomain = r'https?://(?:www\\.|old\\.|pay\\.|ssl\\.|[a-z]{2}\\.)?reddit\\.com'\npost_url = r'%s/r/(.*?)/comments/([\\w-]+)' % domain\nuser_url = r'%s/u(ser)?/([\\w-]+)' % domain\npost_regex = re.compile(post_url)\nuser_regex = re.compile(user_url)\nspoiler_subs = [\n 'stevenuniverse',\n 'onepunchman',\n]\n\n\ndef setup(bot):\n bot.register_url_callback(post_regex, rpost_info)\n bot.register_url_callback(user_regex, redditor_info)\n\n\ndef shutdown(bot):\n bot.unregister_url_callback(post_regex)\n bot.unregister_url_callback(user_regex)\n\n\n@rule('.*%s.*' % post_url)\ndef rpost_info(bot, trigger, match=None):\n match = match or trigger\n try:\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n s = r.submission(id=match.group(2))\n except Exception:\n r = praw.Reddit(user_agent=USER_AGENT)\n s = r.get_submission(submission_id=match.group(2))\n\n message = ('[REDDIT] {title} {link}{nsfw} | {points} points ({percent}) | '\n '{comments} comments | Posted by {author} | '\n 'Created at {created}')\n\n subreddit = s.subreddit.display_name\n if s.is_self:\n link = '(self.{})'.format(subreddit)\n else:\n link = '({}) to r/{}'.format(s.url, subreddit)\n\n if s.over_18:\n if subreddit.lower() in spoiler_subs:\n nsfw = bold(color(' [SPOILERS]', colors.RED))\n else:\n nsfw = bold(color(' [NSFW]', colors.RED))\n\n sfw = bot.db.get_channel_value(trigger.sender, 'sfw')\n if sfw:\n link = '(link hidden)'\n bot.write(['KICK', trigger.sender, trigger.nick,\n 'Linking to NSFW content in a SFW channel.'])\n else:\n nsfw = ''\n\n if s.author:\n author = s.author.name\n else:\n author = '[deleted]'\n\n tz = time.get_timezone(bot.db, bot.config, None, trigger.nick,\n trigger.sender)\n time_created = dt.datetime.utcfromtimestamp(s.created_utc)\n created = time.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, time_created)\n\n if s.score > 0:\n point_color = colors.GREEN\n else:\n point_color = colors.RED\n\n percent = color(unicode(s.upvote_ratio * 100) + '%', point_color)\n\n title = unescape(s.title)\n message = message.format(\n title=title, link=link, nsfw=nsfw, points=s.score, percent=percent,\n comments=s.num_comments, author=author, created=created)\n\n bot.say(message)\n\n\n# If you change this, you'll have to change some other things...\n@commands('redditor')\n@example('.redditor poem_for_your_sprog')\ndef redditor_info(bot, trigger, match=None):\n \"\"\"Show information about the given Redditor\"\"\"\n commanded = re.match(bot.config.core.prefix + 'redditor', trigger)\n r = praw.Reddit(\n user_agent=USER_AGENT,\n client_id='6EiphT6SSQq7FQ',\n client_secret=None,\n )\n match = match or trigger\n try:\n u = r.get_redditor(match.group(2))\n except Exception: # TODO: Be specific\n if commanded:\n bot.say('No such Redditor.')\n return NOLIMIT\n else:\n return\n # Fail silently if it wasn't an explicit command.\n\n message = '[REDDITOR] ' + u.name\n now = dt.datetime.utcnow()\n cakeday_start = dt.datetime.utcfromtimestamp(u.created_utc)\n cakeday_start = cakeday_start.replace(year=now.year)\n day = dt.timedelta(days=1)\n year_div_by_400 = now.year % 400 == 0\n year_div_by_100 = now.year % 100 == 0\n year_div_by_4 = now.year % 4 == 0\n is_leap = year_div_by_400 or ((not year_div_by_100) and year_div_by_4)\n if (not is_leap) and ((cakeday_start.month, cakeday_start.day) == (2, 29)):\n # If cake day is 2/29 and it's not a leap year, cake day is 1/3.\n # Cake day begins at exact account creation time.\n is_cakeday = cakeday_start + day <= now <= cakeday_start + (2 * day)\n else:\n is_cakeday = cakeday_start <= now <= cakeday_start + day\n\n if is_cakeday:\n message = message + ' | \u0002\u000313Cake day\u0003\u0002'\n if commanded:\n message = message + ' | https://reddit.com/u/' + u.name\n if u.is_gold:\n message = message + ' | \u0002\u000308Gold\u0003\u0002'\n if u.is_mod:\n message = message + ' | \u0002\u000305Mod\u0003\u0002'\n message = message + (' | Link: ' + str(u.link_karma) +\n ' | Comment: ' + str(u.comment_karma))\n\n bot.say(message)\n\n\n# If you change the groups here, you'll have to change some things above.\n@rule('.*%s.*' % user_url)\ndef auto_redditor_info(bot, trigger):\n redditor_info(bot, trigger)\n\n\n@require_chanmsg('.setsfw is only permitted in channels')\n@commands('setsafeforwork', 'setsfw')\n@example('.setsfw true')\n@example('.setsfw false')\ndef update_channel(bot, trigger):\n \"\"\"\n Sets the Safe for Work status (true or false) for the current\n channel. Defaults to false.\n \"\"\"\n if bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n return\n else:\n param = 'true'\n if trigger.group(2) and trigger.group(3):\n param = trigger.group(3).strip().lower()\n sfw = param == 'true'\n bot.db.set_channel_value(trigger.sender, 'sfw', sfw)\n if sfw:\n bot.reply('Got it. %s is now flagged as SFW.' % trigger.sender)\n else:\n bot.reply('Got it. %s is now flagged as NSFW.' % trigger.sender)\n\n\n@commands('getsafeforwork', 'getsfw')\n@example('.getsfw [channel]')\ndef get_channel_sfw(bot, trigger):\n \"\"\"\n Gets the preferred channel's Safe for Work status, or the current\n channel's status if no channel given.\n \"\"\"\n channel = trigger.group(2)\n if not channel:\n channel = trigger.sender\n if channel.is_nick():\n return bot.say('.getsfw with no channel param is only permitted in channels')\n\n channel = channel.strip()\n\n sfw = bot.db.get_channel_value(channel, 'sfw')\n if sfw:\n bot.say('%s is flagged as SFW' % channel)\n else:\n bot.say('%s is flagged as NSFW' % channel)\n", "path": "sopel/modules/reddit.py"}]}
| 3,045 | 492 |
gh_patches_debug_11964
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-1439
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'Failed to parse headers' warning logged when getting message/rfc822 content
I've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:
```py
class TestOkayHeaders(SocketDummyServerTestCase):
def _test_okay_header_parsing(self, header):
self.start_response_handler((
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n'
) + header + b'\r\n\r\n'
)
pool = HTTPConnectionPool(self.host, self.port, retries=False)
self.addCleanup(pool.close)
with LogRecorder() as logs:
pool.request('GET', '/')
for record in logs:
assert 'Failed to parse headers' not in record.msg
def test_header_text_plain(self):
self._test_okay_header_parsing(b'Content-type: text/plain')
def test_header_message_rfc822(self):
self._test_okay_header_parsing(b'Content-type: message/rfc822')
```
The test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:
```py
Failed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]
Traceback (most recent call last):
File "/home/user/git/urllib3/src/urllib3/connectionpool.py", line 396, in _make_request
assert_header_parsing(httplib_response.msg)
File "/home/user/git/urllib3/src/urllib3/util/response.py", line 68, in assert_header_parsing
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
urllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]
```
While retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.
</issue>
<code>
[start of src/urllib3/util/response.py]
1 from __future__ import absolute_import
2 from ..packages.six.moves import http_client as httplib
3
4 from ..exceptions import HeaderParsingError
5
6
7 def is_fp_closed(obj):
8 """
9 Checks whether a given file-like object is closed.
10
11 :param obj:
12 The file-like object to check.
13 """
14
15 try:
16 # Check `isclosed()` first, in case Python3 doesn't set `closed`.
17 # GH Issue #928
18 return obj.isclosed()
19 except AttributeError:
20 pass
21
22 try:
23 # Check via the official file-like-object way.
24 return obj.closed
25 except AttributeError:
26 pass
27
28 try:
29 # Check if the object is a container for another file-like object that
30 # gets released on exhaustion (e.g. HTTPResponse).
31 return obj.fp is None
32 except AttributeError:
33 pass
34
35 raise ValueError("Unable to determine whether fp is closed.")
36
37
38 def assert_header_parsing(headers):
39 """
40 Asserts whether all headers have been successfully parsed.
41 Extracts encountered errors from the result of parsing headers.
42
43 Only works on Python 3.
44
45 :param headers: Headers to verify.
46 :type headers: `httplib.HTTPMessage`.
47
48 :raises urllib3.exceptions.HeaderParsingError:
49 If parsing errors are found.
50 """
51
52 # This will fail silently if we pass in the wrong kind of parameter.
53 # To make debugging easier add an explicit check.
54 if not isinstance(headers, httplib.HTTPMessage):
55 raise TypeError('expected httplib.Message, got {0}.'.format(
56 type(headers)))
57
58 defects = getattr(headers, 'defects', None)
59 get_payload = getattr(headers, 'get_payload', None)
60
61 unparsed_data = None
62 if get_payload: # Platform-specific: Python 3.
63 unparsed_data = get_payload()
64
65 if defects or unparsed_data:
66 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
67
68
69 def is_response_to_head(response):
70 """
71 Checks whether the request of a response has been a HEAD-request.
72 Handles the quirks of AppEngine.
73
74 :param conn:
75 :type conn: :class:`httplib.HTTPResponse`
76 """
77 # FIXME: Can we do this somehow without accessing private httplib _method?
78 method = response._method
79 if isinstance(method, int): # Platform-specific: Appengine
80 return method == 3
81 return method.upper() == 'HEAD'
82
[end of src/urllib3/util/response.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py
--- a/src/urllib3/util/response.py
+++ b/src/urllib3/util/response.py
@@ -59,8 +59,14 @@
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
- if get_payload: # Platform-specific: Python 3.
- unparsed_data = get_payload()
+ if get_payload:
+ # get_payload is actually email.message.Message.get_payload;
+ # we're only interested in the result if it's not a multipart message
+ if not headers.is_multipart():
+ payload = get_payload()
+
+ if isinstance(payload, (bytes, str)):
+ unparsed_data = payload
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
|
{"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -59,8 +59,14 @@\n get_payload = getattr(headers, 'get_payload', None)\n \n unparsed_data = None\n- if get_payload: # Platform-specific: Python 3.\n- unparsed_data = get_payload()\n+ if get_payload:\n+ # get_payload is actually email.message.Message.get_payload;\n+ # we're only interested in the result if it's not a multipart message\n+ if not headers.is_multipart():\n+ payload = get_payload()\n+\n+ if isinstance(payload, (bytes, str)):\n+ unparsed_data = payload\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "'Failed to parse headers' warning logged when getting message/rfc822 content\nI've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:\r\n\r\n```py\r\nclass TestOkayHeaders(SocketDummyServerTestCase):\r\n\r\n def _test_okay_header_parsing(self, header):\r\n self.start_response_handler((\r\n b'HTTP/1.1 200 OK\\r\\n'\r\n b'Content-Length: 0\\r\\n'\r\n ) + header + b'\\r\\n\\r\\n'\r\n )\r\n\r\n pool = HTTPConnectionPool(self.host, self.port, retries=False)\r\n self.addCleanup(pool.close)\r\n\r\n with LogRecorder() as logs:\r\n pool.request('GET', '/')\r\n\r\n for record in logs:\r\n assert 'Failed to parse headers' not in record.msg\r\n\r\n def test_header_text_plain(self):\r\n self._test_okay_header_parsing(b'Content-type: text/plain')\r\n\r\n def test_header_message_rfc822(self):\r\n self._test_okay_header_parsing(b'Content-type: message/rfc822')\r\n```\r\n\r\nThe test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:\r\n\r\n```py\r\nFailed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\nTraceback (most recent call last):\r\n File \"/home/user/git/urllib3/src/urllib3/connectionpool.py\", line 396, in _make_request\r\n assert_header_parsing(httplib_response.msg)\r\n File \"/home/user/git/urllib3/src/urllib3/util/response.py\", line 68, in assert_header_parsing\r\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\r\nurllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\n```\r\n\r\nWhile retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError('expected httplib.Message, got {0}.'.format(\n type(headers)))\n\n defects = getattr(headers, 'defects', None)\n get_payload = getattr(headers, 'get_payload', None)\n\n unparsed_data = None\n if get_payload: # Platform-specific: Python 3.\n unparsed_data = get_payload()\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == 'HEAD'\n", "path": "src/urllib3/util/response.py"}]}
| 1,754 | 204 |
gh_patches_debug_26233
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-5828
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unexpected NaN when using big-endian arrays
When a big-endian array is loaded on the GPU using cp.array(), random NaNs appear in the data and calculations will start returning NaN. No errors or warnings are given to the user.
### Conditions
CuPy Version : 7.6.0
CUDA Root : /usr/local/cuda
CUDA Build Version : 9010
CUDA Driver Version : 10010
CUDA Runtime Version : 9010
cuBLAS Version : 9010
cuFFT Version : 9010
cuRAND Version : 9010
cuSOLVER Version : (9, 1, 0)
cuSPARSE Version : 9010
NVRTC Version : (9, 1)
cuDNN Build Version : 7102
cuDNN Version : 7102
NCCL Build Version : 2115
NCCL Runtime Version : (unknown)
CUB Version : None
cuTENSOR Version : None
### Code to reproduce
```import cupy as cp
import numpy as np
data = np.arange(1000*1000, dtype='>f4')/1e9
print(' numpy:', type(data), data.shape, data.dtype)
print(' nan:', np.where(np.isnan(data)))
print(' total:', np.sum(data))
arr = cp.array(data)
print('-----')
print(' cupy:' , type(arr), arr.shape, arr.dtype)
print(' nan:' , cp.where(cp.isnan(arr)))
print(' total:', cp.sum(arr))`
```
### Output of the above code:
```
numpy: <class 'numpy.ndarray'> (1000000,) >f4
nan: (array([], dtype=int64),)
total: 499.99963
-----
cupy: <class 'cupy.core.core.ndarray'> (1000000,) >f4
nan: (array([ 213, 385, 426, ..., 999227, 999242, 999391]),)
total: nan
```
The numpy array shows no NaNs as expected, while the cupy array on the GPU shows several NaNs and functions like sum() that operate on the whole array return NaN as well.
### Scenarios
A fairly common occurence in a scientific environment is when readings FITS files, which store data big-endian, for example using the astropy.io module:
```
import cupy as cp
from astropy.io import fits
data = fits.getdata(filename)
gpu_data = cp.array(data) # Results in NaN
```
A workaround is to convert the array to little endian before using it with cupy:
`data = data.astype(np.float32)`
</issue>
<code>
[start of cupy/_creation/from_data.py]
1 import numpy
2
3 from cupy import _core
4 from cupy._core import fusion
5
6
7 def array(obj, dtype=None, copy=True, order='K', subok=False, ndmin=0):
8 """Creates an array on the current device.
9
10 This function currently does not support the ``subok`` option.
11
12 Args:
13 obj: :class:`cupy.ndarray` object or any other object that can be
14 passed to :func:`numpy.array`.
15 dtype: Data type specifier.
16 copy (bool): If ``False``, this function returns ``obj`` if possible.
17 Otherwise this function always returns a new array.
18 order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major
19 (Fortran-style) order.
20 When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major
21 and uses ``'C'`` otherwise.
22 And when ``order`` is ``'K'``, it keeps strides as closely as
23 possible.
24 If ``obj`` is :class:`numpy.ndarray`, the function returns ``'C'``
25 or ``'F'`` order array.
26 subok (bool): If ``True``, then sub-classes will be passed-through,
27 otherwise the returned array will be forced to be a base-class
28 array (default).
29 ndmin (int): Minimum number of dimensions. Ones are inserted to the
30 head of the shape if needed.
31
32 Returns:
33 cupy.ndarray: An array on the current device.
34
35 .. note::
36 This method currently does not support ``subok`` argument.
37
38 .. seealso:: :func:`numpy.array`
39
40 """
41 return _core.array(obj, dtype, copy, order, subok, ndmin)
42
43
44 def asarray(a, dtype=None, order=None):
45 """Converts an object to array.
46
47 This is equivalent to ``array(a, dtype, copy=False)``.
48 This function currently does not support the ``order`` option.
49
50 Args:
51 a: The source object.
52 dtype: Data type specifier. It is inferred from the input by default.
53 order ({'C', 'F'}):
54 Whether to use row-major (C-style) or column-major (Fortran-style)
55 memory representation. Defaults to ``'C'``. ``order`` is ignored
56 for objects that are not :class:`cupy.ndarray`, but have the
57 ``__cuda_array_interface__`` attribute.
58
59 Returns:
60 cupy.ndarray: An array on the current device. If ``a`` is already on
61 the device, no copy is performed.
62
63 .. seealso:: :func:`numpy.asarray`
64
65 """
66 return _core.array(a, dtype, False, order)
67
68
69 def asanyarray(a, dtype=None, order=None):
70 """Converts an object to array.
71
72 This is currently equivalent to :func:`cupy.asarray`, since there is no
73 subclass of :class:`cupy.ndarray` in CuPy. Note that the original
74 :func:`numpy.asanyarray` returns the input array as is if it is an instance
75 of a subtype of :class:`numpy.ndarray`.
76
77 .. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray`
78
79 """
80 return _core.array(a, dtype, False, order)
81
82
83 def ascontiguousarray(a, dtype=None):
84 """Returns a C-contiguous array.
85
86 Args:
87 a (cupy.ndarray): Source array.
88 dtype: Data type specifier.
89
90 Returns:
91 cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it
92 returns a copy of ``a``.
93
94 .. seealso:: :func:`numpy.ascontiguousarray`
95
96 """
97 return _core.ascontiguousarray(a, dtype)
98
99
100 # TODO(okuta): Implement asmatrix
101
102
103 def copy(a, order='K'):
104 """Creates a copy of a given array on the current device.
105
106 This function allocates the new array on the current device. If the given
107 array is allocated on the different device, then this function tries to
108 copy the contents over the devices.
109
110 Args:
111 a (cupy.ndarray): The source array.
112 order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major
113 (Fortran-style) order.
114 When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major
115 and uses ``'C'`` otherwise.
116 And when ``order`` is ``'K'``, it keeps strides as closely as
117 possible.
118
119 Returns:
120 cupy.ndarray: The copy of ``a`` on the current device.
121
122 .. seealso:: :func:`numpy.copy`, :meth:`cupy.ndarray.copy`
123
124 """
125 if fusion._is_fusing():
126 if order != 'K':
127 raise NotImplementedError(
128 'cupy.copy does not support `order` in fusion yet.')
129 return fusion._call_ufunc(_core.elementwise_copy, a)
130
131 # If the current device is different from the device of ``a``, then this
132 # function allocates a new array on the current device, and copies the
133 # contents over the devices.
134 return a.copy(order=order)
135
136
137 # TODO(okuta): Implement frombuffer
138
139
140 def fromfile(*args, **kwargs):
141 """Reads an array from a file.
142
143 .. note::
144 Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.
145
146 .. seealso:: :func:`numpy.fromfile`
147
148 """
149 return asarray(numpy.fromfile(*args, **kwargs))
150
151
152 # TODO(okuta): Implement fromfunction
153
154
155 # TODO(okuta): Implement fromiter
156
157
158 # TODO(okuta): Implement fromstring
159
160
161 # TODO(okuta): Implement loadtxt
162
[end of cupy/_creation/from_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cupy/_creation/from_data.py b/cupy/_creation/from_data.py
--- a/cupy/_creation/from_data.py
+++ b/cupy/_creation/from_data.py
@@ -35,6 +35,11 @@
.. note::
This method currently does not support ``subok`` argument.
+ .. note::
+ If ``obj`` is an `numpy.ndarray` instance that contains big-endian data,
+ this function automatically swaps its byte order to little-endian,
+ which is the NVIDIA and AMD GPU architecture's native use.
+
.. seealso:: :func:`numpy.array`
"""
@@ -60,6 +65,11 @@
cupy.ndarray: An array on the current device. If ``a`` is already on
the device, no copy is performed.
+ .. note::
+ If ``a`` is an `numpy.ndarray` instance that contains big-endian data,
+ this function automatically swaps its byte order to little-endian,
+ which is the NVIDIA and AMD GPU architecture's native use.
+
.. seealso:: :func:`numpy.asarray`
"""
@@ -143,6 +153,11 @@
.. note::
Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.
+ .. note::
+ If you let NumPy's ``fromfile`` read the file in big-endian, CuPy
+ automatically swaps its byte order to little-endian, which is the NVIDIA
+ and AMD GPU architecture's native use.
+
.. seealso:: :func:`numpy.fromfile`
"""
|
{"golden_diff": "diff --git a/cupy/_creation/from_data.py b/cupy/_creation/from_data.py\n--- a/cupy/_creation/from_data.py\n+++ b/cupy/_creation/from_data.py\n@@ -35,6 +35,11 @@\n .. note::\n This method currently does not support ``subok`` argument.\n \n+ .. note::\n+ If ``obj`` is an `numpy.ndarray` instance that contains big-endian data,\n+ this function automatically swaps its byte order to little-endian,\n+ which is the NVIDIA and AMD GPU architecture's native use.\n+\n .. seealso:: :func:`numpy.array`\n \n \"\"\"\n@@ -60,6 +65,11 @@\n cupy.ndarray: An array on the current device. If ``a`` is already on\n the device, no copy is performed.\n \n+ .. note::\n+ If ``a`` is an `numpy.ndarray` instance that contains big-endian data,\n+ this function automatically swaps its byte order to little-endian,\n+ which is the NVIDIA and AMD GPU architecture's native use.\n+\n .. seealso:: :func:`numpy.asarray`\n \n \"\"\"\n@@ -143,6 +153,11 @@\n .. note::\n Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.\n \n+ .. note::\n+ If you let NumPy's ``fromfile`` read the file in big-endian, CuPy\n+ automatically swaps its byte order to little-endian, which is the NVIDIA\n+ and AMD GPU architecture's native use.\n+\n .. seealso:: :func:`numpy.fromfile`\n \n \"\"\"\n", "issue": "Unexpected NaN when using big-endian arrays\nWhen a big-endian array is loaded on the GPU using cp.array(), random NaNs appear in the data and calculations will start returning NaN. No errors or warnings are given to the user.\r\n\r\n\r\n### Conditions\r\nCuPy Version : 7.6.0\r\nCUDA Root : /usr/local/cuda\r\nCUDA Build Version : 9010\r\nCUDA Driver Version : 10010\r\nCUDA Runtime Version : 9010\r\ncuBLAS Version : 9010\r\ncuFFT Version : 9010\r\ncuRAND Version : 9010\r\ncuSOLVER Version : (9, 1, 0)\r\ncuSPARSE Version : 9010\r\nNVRTC Version : (9, 1)\r\ncuDNN Build Version : 7102\r\ncuDNN Version : 7102\r\nNCCL Build Version : 2115\r\nNCCL Runtime Version : (unknown)\r\nCUB Version : None\r\ncuTENSOR Version : None\r\n\r\n### Code to reproduce\r\n```import cupy as cp\r\nimport numpy as np\r\n\r\ndata = np.arange(1000*1000, dtype='>f4')/1e9\r\nprint(' numpy:', type(data), data.shape, data.dtype)\r\nprint(' nan:', np.where(np.isnan(data)))\r\nprint(' total:', np.sum(data))\r\n\r\narr = cp.array(data)\r\nprint('-----')\r\nprint(' cupy:' , type(arr), arr.shape, arr.dtype)\r\nprint(' nan:' , cp.where(cp.isnan(arr)))\r\nprint(' total:', cp.sum(arr))`\r\n```\r\n\r\n### Output of the above code:\r\n\r\n```\r\n numpy: <class 'numpy.ndarray'> (1000000,) >f4\r\n nan: (array([], dtype=int64),)\r\n total: 499.99963\r\n-----\r\n cupy: <class 'cupy.core.core.ndarray'> (1000000,) >f4\r\n nan: (array([ 213, 385, 426, ..., 999227, 999242, 999391]),)\r\n total: nan\r\n```\r\n\r\nThe numpy array shows no NaNs as expected, while the cupy array on the GPU shows several NaNs and functions like sum() that operate on the whole array return NaN as well.\r\n\r\n### Scenarios\r\n\r\nA fairly common occurence in a scientific environment is when readings FITS files, which store data big-endian, for example using the astropy.io module:\r\n\r\n```\r\nimport cupy as cp\r\nfrom astropy.io import fits\r\n\r\ndata = fits.getdata(filename)\r\ngpu_data = cp.array(data) # Results in NaN\r\n```\r\n\r\nA workaround is to convert the array to little endian before using it with cupy:\r\n\r\n`data = data.astype(np.float32)`\r\n\r\n\n", "before_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import fusion\n\n\ndef array(obj, dtype=None, copy=True, order='K', subok=False, ndmin=0):\n \"\"\"Creates an array on the current device.\n\n This function currently does not support the ``subok`` option.\n\n Args:\n obj: :class:`cupy.ndarray` object or any other object that can be\n passed to :func:`numpy.array`.\n dtype: Data type specifier.\n copy (bool): If ``False``, this function returns ``obj`` if possible.\n Otherwise this function always returns a new array.\n order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major\n and uses ``'C'`` otherwise.\n And when ``order`` is ``'K'``, it keeps strides as closely as\n possible.\n If ``obj`` is :class:`numpy.ndarray`, the function returns ``'C'``\n or ``'F'`` order array.\n subok (bool): If ``True``, then sub-classes will be passed-through,\n otherwise the returned array will be forced to be a base-class\n array (default).\n ndmin (int): Minimum number of dimensions. Ones are inserted to the\n head of the shape if needed.\n\n Returns:\n cupy.ndarray: An array on the current device.\n\n .. note::\n This method currently does not support ``subok`` argument.\n\n .. seealso:: :func:`numpy.array`\n\n \"\"\"\n return _core.array(obj, dtype, copy, order, subok, ndmin)\n\n\ndef asarray(a, dtype=None, order=None):\n \"\"\"Converts an object to array.\n\n This is equivalent to ``array(a, dtype, copy=False)``.\n This function currently does not support the ``order`` option.\n\n Args:\n a: The source object.\n dtype: Data type specifier. It is inferred from the input by default.\n order ({'C', 'F'}):\n Whether to use row-major (C-style) or column-major (Fortran-style)\n memory representation. Defaults to ``'C'``. ``order`` is ignored\n for objects that are not :class:`cupy.ndarray`, but have the\n ``__cuda_array_interface__`` attribute.\n\n Returns:\n cupy.ndarray: An array on the current device. If ``a`` is already on\n the device, no copy is performed.\n\n .. seealso:: :func:`numpy.asarray`\n\n \"\"\"\n return _core.array(a, dtype, False, order)\n\n\ndef asanyarray(a, dtype=None, order=None):\n \"\"\"Converts an object to array.\n\n This is currently equivalent to :func:`cupy.asarray`, since there is no\n subclass of :class:`cupy.ndarray` in CuPy. Note that the original\n :func:`numpy.asanyarray` returns the input array as is if it is an instance\n of a subtype of :class:`numpy.ndarray`.\n\n .. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray`\n\n \"\"\"\n return _core.array(a, dtype, False, order)\n\n\ndef ascontiguousarray(a, dtype=None):\n \"\"\"Returns a C-contiguous array.\n\n Args:\n a (cupy.ndarray): Source array.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it\n returns a copy of ``a``.\n\n .. seealso:: :func:`numpy.ascontiguousarray`\n\n \"\"\"\n return _core.ascontiguousarray(a, dtype)\n\n\n# TODO(okuta): Implement asmatrix\n\n\ndef copy(a, order='K'):\n \"\"\"Creates a copy of a given array on the current device.\n\n This function allocates the new array on the current device. If the given\n array is allocated on the different device, then this function tries to\n copy the contents over the devices.\n\n Args:\n a (cupy.ndarray): The source array.\n order ({'C', 'F', 'A', 'K'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n When ``order`` is ``'A'``, it uses ``'F'`` if ``a`` is column-major\n and uses ``'C'`` otherwise.\n And when ``order`` is ``'K'``, it keeps strides as closely as\n possible.\n\n Returns:\n cupy.ndarray: The copy of ``a`` on the current device.\n\n .. seealso:: :func:`numpy.copy`, :meth:`cupy.ndarray.copy`\n\n \"\"\"\n if fusion._is_fusing():\n if order != 'K':\n raise NotImplementedError(\n 'cupy.copy does not support `order` in fusion yet.')\n return fusion._call_ufunc(_core.elementwise_copy, a)\n\n # If the current device is different from the device of ``a``, then this\n # function allocates a new array on the current device, and copies the\n # contents over the devices.\n return a.copy(order=order)\n\n\n# TODO(okuta): Implement frombuffer\n\n\ndef fromfile(*args, **kwargs):\n \"\"\"Reads an array from a file.\n\n .. note::\n Uses NumPy's ``fromfile`` and coerces the result to a CuPy array.\n\n .. seealso:: :func:`numpy.fromfile`\n\n \"\"\"\n return asarray(numpy.fromfile(*args, **kwargs))\n\n\n# TODO(okuta): Implement fromfunction\n\n\n# TODO(okuta): Implement fromiter\n\n\n# TODO(okuta): Implement fromstring\n\n\n# TODO(okuta): Implement loadtxt\n", "path": "cupy/_creation/from_data.py"}]}
| 2,859 | 361 |
gh_patches_debug_2822
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-5092
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove obsolete 'bin/st2-check-license'
The project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.
The task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.
This is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.
Bonus points to find other obsolete, outdated, irrelevant scripts in st2 core.
Help wanted!
</issue>
<code>
[start of st2common/setup.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2020 The StackStorm Authors.
3 # Copyright 2019 Extreme Networks, Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from __future__ import absolute_import
18 import os.path
19
20 from setuptools import setup, find_packages
21
22 from dist_utils import fetch_requirements
23 from dist_utils import apply_vagrant_workaround
24 from dist_utils import get_version_string
25
26 ST2_COMPONENT = 'st2common'
27 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
28 REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')
29 INIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')
30
31 install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
32
33 apply_vagrant_workaround()
34 setup(
35 name=ST2_COMPONENT,
36 version=get_version_string(INIT_FILE),
37 description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),
38 author='StackStorm',
39 author_email='[email protected]',
40 license='Apache License (2.0)',
41 url='https://stackstorm.com/',
42 install_requires=install_reqs,
43 dependency_links=dep_links,
44 test_suite=ST2_COMPONENT,
45 zip_safe=False,
46 include_package_data=True,
47 packages=find_packages(exclude=['setuptools', 'tests']),
48 scripts=[
49 'bin/st2-bootstrap-rmq',
50 'bin/st2-cleanup-db',
51 'bin/st2-register-content',
52 'bin/st2-purge-executions',
53 'bin/st2-purge-trigger-instances',
54 'bin/st2-run-pack-tests',
55 'bin/st2ctl',
56 'bin/st2-generate-symmetric-crypto-key',
57 'bin/st2-self-check',
58 'bin/st2-track-result',
59 'bin/st2-validate-pack-config',
60 'bin/st2-check-license',
61 'bin/st2-pack-install',
62 'bin/st2-pack-download',
63 'bin/st2-pack-setup-virtualenv'
64 ],
65 entry_points={
66 'st2common.metrics.driver': [
67 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',
68 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',
69 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'
70 ],
71 'st2common.rbac.backend': [
72 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'
73 ],
74 }
75 )
76
[end of st2common/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/setup.py b/st2common/setup.py
--- a/st2common/setup.py
+++ b/st2common/setup.py
@@ -57,7 +57,6 @@
'bin/st2-self-check',
'bin/st2-track-result',
'bin/st2-validate-pack-config',
- 'bin/st2-check-license',
'bin/st2-pack-install',
'bin/st2-pack-download',
'bin/st2-pack-setup-virtualenv'
|
{"golden_diff": "diff --git a/st2common/setup.py b/st2common/setup.py\n--- a/st2common/setup.py\n+++ b/st2common/setup.py\n@@ -57,7 +57,6 @@\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n- 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n", "issue": "Remove obsolete 'bin/st2-check-license'\nThe project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.\r\n\r\nThe task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.\r\n\r\nThis is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.\r\nBonus points to find other obsolete, outdated, irrelevant scripts in st2 core.\r\nHelp wanted!\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py"}]}
| 1,444 | 105 |
gh_patches_debug_743
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-3148
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
clarification on `note`
https://hypothesis.readthedocs.io/en/latest/details.html#hypothesis.note states
`Report this value in the final execution.`
From my test, `note` wasn't printed on successful run and was printed on falsified run.
Please help me understand this functionality
</issue>
<code>
[start of hypothesis-python/src/hypothesis/control.py]
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2021 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 import math
17 import traceback
18 from typing import NoReturn, Union
19
20 from hypothesis import Verbosity, settings
21 from hypothesis.errors import CleanupFailed, InvalidArgument, UnsatisfiedAssumption
22 from hypothesis.internal.conjecture.data import ConjectureData
23 from hypothesis.internal.validation import check_type
24 from hypothesis.reporting import report, verbose_report
25 from hypothesis.utils.dynamicvariables import DynamicVariable
26
27
28 def reject() -> NoReturn:
29 raise UnsatisfiedAssumption()
30
31
32 def assume(condition: object) -> bool:
33 """Calling ``assume`` is like an :ref:`assert <python:assert>` that marks
34 the example as bad, rather than failing the test.
35
36 This allows you to specify properties that you *assume* will be
37 true, and let Hypothesis try to avoid similar examples in future.
38 """
39 if not condition:
40 raise UnsatisfiedAssumption()
41 return True
42
43
44 _current_build_context = DynamicVariable(None)
45
46
47 def currently_in_test_context() -> bool:
48 """Return ``True`` if the calling code is currently running inside an
49 :func:`@given <hypothesis.given>` or :doc:`stateful <stateful>` test,
50 ``False`` otherwise.
51
52 This is useful for third-party integrations and assertion helpers which
53 may be called from traditional or property-based tests, but can only use
54 :func:`~hypothesis.assume` or :func:`~hypothesis.target` in the latter case.
55 """
56 return _current_build_context.value is not None
57
58
59 def current_build_context():
60 context = _current_build_context.value
61 if context is None:
62 raise InvalidArgument("No build context registered")
63 return context
64
65
66 class BuildContext:
67 def __init__(self, data, is_final=False, close_on_capture=True):
68 assert isinstance(data, ConjectureData)
69 self.data = data
70 self.tasks = []
71 self.is_final = is_final
72 self.close_on_capture = close_on_capture
73 self.close_on_del = False
74
75 def __enter__(self):
76 self.assign_variable = _current_build_context.with_value(self)
77 self.assign_variable.__enter__()
78 return self
79
80 def __exit__(self, exc_type, exc_value, tb):
81 self.assign_variable.__exit__(exc_type, exc_value, tb)
82 if self.close() and exc_type is None:
83 raise CleanupFailed()
84
85 def close(self):
86 any_failed = False
87 for task in self.tasks:
88 try:
89 task()
90 except BaseException:
91 any_failed = True
92 report(traceback.format_exc())
93 return any_failed
94
95
96 def cleanup(teardown):
97 """Register a function to be called when the current test has finished
98 executing. Any exceptions thrown in teardown will be printed but not
99 rethrown.
100
101 Inside a test this isn't very interesting, because you can just use
102 a finally block, but note that you can use this inside map, flatmap,
103 etc. in order to e.g. insist that a value is closed at the end.
104 """
105 context = _current_build_context.value
106 if context is None:
107 raise InvalidArgument("Cannot register cleanup outside of build context")
108 context.tasks.append(teardown)
109
110
111 def should_note():
112 context = _current_build_context.value
113 if context is None:
114 raise InvalidArgument("Cannot make notes outside of a test")
115 return context.is_final or settings.default.verbosity >= Verbosity.verbose
116
117
118 def note(value: str) -> None:
119 """Report this value in the final execution."""
120 if should_note():
121 report(value)
122
123
124 def event(value: str) -> None:
125 """Record an event that occurred this test. Statistics on number of test
126 runs with each event will be reported at the end if you run Hypothesis in
127 statistics reporting mode.
128
129 Events should be strings or convertible to them.
130 """
131 context = _current_build_context.value
132 if context is None:
133 raise InvalidArgument("Cannot make record events outside of a test")
134
135 context.data.note_event(value)
136
137
138 def target(observation: Union[int, float], *, label: str = "") -> Union[int, float]:
139 """Calling this function with an ``int`` or ``float`` observation gives it feedback
140 with which to guide our search for inputs that will cause an error, in
141 addition to all the usual heuristics. Observations must always be finite.
142
143 Hypothesis will try to maximize the observed value over several examples;
144 almost any metric will work so long as it makes sense to increase it.
145 For example, ``-abs(error)`` is a metric that increases as ``error``
146 approaches zero.
147
148 Example metrics:
149
150 - Number of elements in a collection, or tasks in a queue
151 - Mean or maximum runtime of a task (or both, if you use ``label``)
152 - Compression ratio for data (perhaps per-algorithm or per-level)
153 - Number of steps taken by a state machine
154
155 The optional ``label`` argument can be used to distinguish between
156 and therefore separately optimise distinct observations, such as the
157 mean and standard deviation of a dataset. It is an error to call
158 ``target()`` with any label more than once per test case.
159
160 .. note::
161 **The more examples you run, the better this technique works.**
162
163 As a rule of thumb, the targeting effect is noticeable above
164 :obj:`max_examples=1000 <hypothesis.settings.max_examples>`,
165 and immediately obvious by around ten thousand examples
166 *per label* used by your test.
167
168 :ref:`statistics` include the best score seen for each label,
169 which can help avoid `the threshold problem
170 <https://hypothesis.works/articles/threshold-problem/>`__ when the minimal
171 example shrinks right down to the threshold of failure (:issue:`2180`).
172 """
173 check_type((int, float), observation, "observation")
174 if not math.isfinite(observation):
175 raise InvalidArgument(f"observation={observation!r} must be a finite float.")
176 check_type(str, label, "label")
177
178 context = _current_build_context.value
179 if context is None:
180 raise InvalidArgument(
181 "Calling target() outside of a test is invalid. "
182 "Consider guarding this call with `if currently_in_test_context(): ...`"
183 )
184 verbose_report(f"Saw target(observation={observation!r}, label={label!r})")
185
186 if label in context.data.target_observations:
187 raise InvalidArgument(
188 f"Calling target({observation!r}, label={label!r}) would overwrite "
189 f"target({context.data.target_observations[label]!r}, label={label!r})"
190 )
191 else:
192 context.data.target_observations[label] = observation
193
194 return observation
195
[end of hypothesis-python/src/hypothesis/control.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py
--- a/hypothesis-python/src/hypothesis/control.py
+++ b/hypothesis-python/src/hypothesis/control.py
@@ -116,7 +116,7 @@
def note(value: str) -> None:
- """Report this value in the final execution."""
+ """Report this value for the minimal failing example."""
if should_note():
report(value)
|
{"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py\n--- a/hypothesis-python/src/hypothesis/control.py\n+++ b/hypothesis-python/src/hypothesis/control.py\n@@ -116,7 +116,7 @@\n \n \n def note(value: str) -> None:\n- \"\"\"Report this value in the final execution.\"\"\"\n+ \"\"\"Report this value for the minimal failing example.\"\"\"\n if should_note():\n report(value)\n", "issue": "clarification on `note`\nhttps://hypothesis.readthedocs.io/en/latest/details.html#hypothesis.note states\r\n\r\n`Report this value in the final execution.`\r\n\r\nFrom my test, `note` wasn't printed on successful run and was printed on falsified run.\r\n\r\nPlease help me understand this functionality\r\n\r\n\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport math\nimport traceback\nfrom typing import NoReturn, Union\n\nfrom hypothesis import Verbosity, settings\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, UnsatisfiedAssumption\nfrom hypothesis.internal.conjecture.data import ConjectureData\nfrom hypothesis.internal.validation import check_type\nfrom hypothesis.reporting import report, verbose_report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\n\ndef reject() -> NoReturn:\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition: object) -> bool:\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef currently_in_test_context() -> bool:\n \"\"\"Return ``True`` if the calling code is currently running inside an\n :func:`@given <hypothesis.given>` or :doc:`stateful <stateful>` test,\n ``False`` otherwise.\n\n This is useful for third-party integrations and assertion helpers which\n may be called from traditional or property-based tests, but can only use\n :func:`~hypothesis.assume` or :func:`~hypothesis.target` in the latter case.\n \"\"\"\n return _current_build_context.value is not None\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"No build context registered\")\n return context\n\n\nclass BuildContext:\n def __init__(self, data, is_final=False, close_on_capture=True):\n assert isinstance(data, ConjectureData)\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot register cleanup outside of build context\")\n context.tasks.append(teardown)\n\n\ndef should_note():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot make notes outside of a test\")\n return context.is_final or settings.default.verbosity >= Verbosity.verbose\n\n\ndef note(value: str) -> None:\n \"\"\"Report this value in the final execution.\"\"\"\n if should_note():\n report(value)\n\n\ndef event(value: str) -> None:\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\"Cannot make record events outside of a test\")\n\n context.data.note_event(value)\n\n\ndef target(observation: Union[int, float], *, label: str = \"\") -> Union[int, float]:\n \"\"\"Calling this function with an ``int`` or ``float`` observation gives it feedback\n with which to guide our search for inputs that will cause an error, in\n addition to all the usual heuristics. Observations must always be finite.\n\n Hypothesis will try to maximize the observed value over several examples;\n almost any metric will work so long as it makes sense to increase it.\n For example, ``-abs(error)`` is a metric that increases as ``error``\n approaches zero.\n\n Example metrics:\n\n - Number of elements in a collection, or tasks in a queue\n - Mean or maximum runtime of a task (or both, if you use ``label``)\n - Compression ratio for data (perhaps per-algorithm or per-level)\n - Number of steps taken by a state machine\n\n The optional ``label`` argument can be used to distinguish between\n and therefore separately optimise distinct observations, such as the\n mean and standard deviation of a dataset. It is an error to call\n ``target()`` with any label more than once per test case.\n\n .. note::\n **The more examples you run, the better this technique works.**\n\n As a rule of thumb, the targeting effect is noticeable above\n :obj:`max_examples=1000 <hypothesis.settings.max_examples>`,\n and immediately obvious by around ten thousand examples\n *per label* used by your test.\n\n :ref:`statistics` include the best score seen for each label,\n which can help avoid `the threshold problem\n <https://hypothesis.works/articles/threshold-problem/>`__ when the minimal\n example shrinks right down to the threshold of failure (:issue:`2180`).\n \"\"\"\n check_type((int, float), observation, \"observation\")\n if not math.isfinite(observation):\n raise InvalidArgument(f\"observation={observation!r} must be a finite float.\")\n check_type(str, label, \"label\")\n\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n \"Calling target() outside of a test is invalid. \"\n \"Consider guarding this call with `if currently_in_test_context(): ...`\"\n )\n verbose_report(f\"Saw target(observation={observation!r}, label={label!r})\")\n\n if label in context.data.target_observations:\n raise InvalidArgument(\n f\"Calling target({observation!r}, label={label!r}) would overwrite \"\n f\"target({context.data.target_observations[label]!r}, label={label!r})\"\n )\n else:\n context.data.target_observations[label] = observation\n\n return observation\n", "path": "hypothesis-python/src/hypothesis/control.py"}]}
| 2,732 | 108 |
gh_patches_debug_27438
|
rasdani/github-patches
|
git_diff
|
huggingface__dataset-viewer-2389
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't ignore webhooks when a dataset changes visibility
See:
- https://github.com/huggingface/moon-landing/issues/8779
- https://github.com/huggingface/moon-landing/pull/8825
A webhook is sent when a dataset is toggle between public and private.
Currently, we ignore them due to
https://github.com/huggingface/datasets-server/blob/66c1e089e204ab33195b957e1b99b0da6a4dd2de/services/api/src/api/routes/webhook.py#L84-L89
Instead, I think we should delete+update the dataset
</issue>
<code>
[start of services/api/src/api/routes/webhook.py]
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 import logging
5 from typing import Any, Literal, Optional, TypedDict
6
7 from jsonschema import ValidationError, validate
8 from libapi.utils import Endpoint, get_response
9 from libcommon.dtos import Priority
10 from libcommon.exceptions import CustomError
11 from libcommon.operations import delete_dataset, get_current_revision, update_dataset
12 from libcommon.prometheus import StepProfiler
13 from libcommon.storage_client import StorageClient
14 from starlette.requests import Request
15 from starlette.responses import Response
16
17 schema = {
18 "$schema": "https://json-schema.org/draft/2020-12/schema",
19 "type": "object",
20 "properties": {
21 "event": {"type": "string", "enum": ["add", "remove", "update", "move"]},
22 "movedTo": {"type": "string"},
23 "repo": {
24 "type": "object",
25 "properties": {
26 "headSha": {"type": "string"},
27 "name": {"type": "string"},
28 "type": {"type": "string", "enum": ["dataset", "model", "space"]},
29 },
30 "required": ["type", "name"],
31 },
32 "scope": {
33 "type": "string",
34 },
35 },
36 "required": ["event", "repo", "scope"],
37 }
38
39
40 class _MoonWebhookV2PayloadRepo(TypedDict):
41 type: Literal["model", "dataset", "space"]
42 name: str
43
44
45 class MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False):
46 headSha: Optional[str]
47
48
49 class MoonWebhookV2Payload(TypedDict):
50 """
51 Payload from a moon-landing webhook call, v2.
52 """
53
54 event: Literal["add", "remove", "update", "move"]
55 movedTo: Optional[str]
56 repo: MoonWebhookV2PayloadRepo
57 scope: str
58
59
60 def parse_payload(json: Any) -> MoonWebhookV2Payload:
61 validate(instance=json, schema=schema)
62 return json # type: ignore
63 # ^ validate() ensures the content is correct, but does not give the type
64
65
66 def process_payload(
67 payload: MoonWebhookV2Payload,
68 blocked_datasets: list[str],
69 hf_endpoint: str,
70 hf_token: Optional[str] = None,
71 hf_timeout_seconds: Optional[float] = None,
72 storage_clients: Optional[list[StorageClient]] = None,
73 ) -> None:
74 if payload["repo"]["type"] != "dataset" or payload["scope"] not in ("repo", "repo.content"):
75 # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos
76 return
77 dataset = payload["repo"]["name"]
78 if dataset is None:
79 return
80 event = payload["event"]
81 if event == "remove":
82 delete_dataset(dataset=dataset, storage_clients=storage_clients)
83 elif event in ["add", "update", "move"]:
84 if event == "update" and get_current_revision(dataset) == payload["repo"]["headSha"]:
85 # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated
86 logging.warning(
87 f"Webhook revision for {dataset} is the same as the current revision in the db - skipping update."
88 )
89 return
90 delete_dataset(dataset=dataset, storage_clients=storage_clients)
91 # ^ delete the old contents (cache + jobs + assets) to avoid mixed content
92 new_dataset = (event == "move" and payload["movedTo"]) or dataset
93 update_dataset(
94 dataset=new_dataset,
95 priority=Priority.NORMAL,
96 blocked_datasets=blocked_datasets,
97 hf_endpoint=hf_endpoint,
98 hf_token=hf_token,
99 hf_timeout_seconds=hf_timeout_seconds,
100 storage_clients=storage_clients,
101 )
102
103
104 def create_webhook_endpoint(
105 blocked_datasets: list[str],
106 hf_endpoint: str,
107 hf_token: Optional[str] = None,
108 hf_timeout_seconds: Optional[float] = None,
109 hf_webhook_secret: Optional[str] = None,
110 storage_clients: Optional[list[StorageClient]] = None,
111 ) -> Endpoint:
112 async def webhook_endpoint(request: Request) -> Response:
113 with StepProfiler(method="webhook_endpoint", step="all"):
114 with StepProfiler(method="webhook_endpoint", step="get JSON"):
115 try:
116 json = await request.json()
117 except Exception:
118 content = {"status": "error", "error": "the body could not be parsed as a JSON"}
119 logging.info("/webhook: the body could not be parsed as a JSON.")
120 return get_response(content, 400)
121 logging.info(f"/webhook: {json}")
122 with StepProfiler(method="webhook_endpoint", step="parse payload and headers"):
123 try:
124 payload = parse_payload(json)
125 except ValidationError as e:
126 content = {"status": "error", "error": "the JSON payload is invalid"}
127 logging.info(f"/webhook: the JSON body is invalid. JSON: {json}. Error: {e}")
128 return get_response(content, 400)
129 except Exception as e:
130 logging.exception("Unexpected error", exc_info=e)
131 content = {"status": "error", "error": "unexpected error"}
132 logging.warning(f"/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}")
133 return get_response(content, 500)
134
135 HEADER = "x-webhook-secret"
136 trust_sender = (
137 hf_webhook_secret is not None
138 and (secret := request.headers.get(HEADER)) is not None
139 and secret == hf_webhook_secret
140 )
141 if not trust_sender:
142 logging.info(f"/webhook: the sender is not trusted. JSON: {json}")
143 return get_response(
144 {"status": "error", "error": "The sender is not trusted. Retry with a valid secret."}, 400
145 )
146
147 with StepProfiler(method="webhook_endpoint", step="process payload"):
148 try:
149 process_payload(
150 payload=payload,
151 blocked_datasets=blocked_datasets,
152 hf_endpoint=hf_endpoint,
153 hf_token=hf_token,
154 hf_timeout_seconds=hf_timeout_seconds,
155 storage_clients=storage_clients,
156 )
157 except CustomError as e:
158 content = {"status": "error", "error": "the dataset is not supported"}
159 dataset = payload["repo"]["name"]
160 logging.debug(f"/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}")
161 return get_response(content, 400)
162 content = {"status": "ok"}
163 return get_response(content, 200)
164
165 return webhook_endpoint
166
[end of services/api/src/api/routes/webhook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py
--- a/services/api/src/api/routes/webhook.py
+++ b/services/api/src/api/routes/webhook.py
@@ -71,7 +71,7 @@
hf_timeout_seconds: Optional[float] = None,
storage_clients: Optional[list[StorageClient]] = None,
) -> None:
- if payload["repo"]["type"] != "dataset" or payload["scope"] not in ("repo", "repo.content"):
+ if payload["repo"]["type"] != "dataset" or payload["scope"] not in ("repo", "repo.content", "repo.config"):
# ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos
return
dataset = payload["repo"]["name"]
@@ -81,7 +81,11 @@
if event == "remove":
delete_dataset(dataset=dataset, storage_clients=storage_clients)
elif event in ["add", "update", "move"]:
- if event == "update" and get_current_revision(dataset) == payload["repo"]["headSha"]:
+ if (
+ event == "update"
+ and get_current_revision(dataset) == payload["repo"]["headSha"]
+ and not payload["scope"] == "repo.config"
+ ):
# ^ it filters out the webhook calls when the refs/convert/parquet branch is updated
logging.warning(
f"Webhook revision for {dataset} is the same as the current revision in the db - skipping update."
|
{"golden_diff": "diff --git a/services/api/src/api/routes/webhook.py b/services/api/src/api/routes/webhook.py\n--- a/services/api/src/api/routes/webhook.py\n+++ b/services/api/src/api/routes/webhook.py\n@@ -71,7 +71,7 @@\n hf_timeout_seconds: Optional[float] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n ) -> None:\n- if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\"):\n+ if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\", \"repo.config\"):\n # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos\n return\n dataset = payload[\"repo\"][\"name\"]\n@@ -81,7 +81,11 @@\n if event == \"remove\":\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n elif event in [\"add\", \"update\", \"move\"]:\n- if event == \"update\" and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]:\n+ if (\n+ event == \"update\"\n+ and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]\n+ and not payload[\"scope\"] == \"repo.config\"\n+ ):\n # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated\n logging.warning(\n f\"Webhook revision for {dataset} is the same as the current revision in the db - skipping update.\"\n", "issue": "Don't ignore webhooks when a dataset changes visibility\nSee:\r\n- https://github.com/huggingface/moon-landing/issues/8779\r\n- https://github.com/huggingface/moon-landing/pull/8825\r\n\r\nA webhook is sent when a dataset is toggle between public and private.\r\n\r\nCurrently, we ignore them due to \r\n\r\nhttps://github.com/huggingface/datasets-server/blob/66c1e089e204ab33195b957e1b99b0da6a4dd2de/services/api/src/api/routes/webhook.py#L84-L89\r\n\r\nInstead, I think we should delete+update the dataset\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom typing import Any, Literal, Optional, TypedDict\n\nfrom jsonschema import ValidationError, validate\nfrom libapi.utils import Endpoint, get_response\nfrom libcommon.dtos import Priority\nfrom libcommon.exceptions import CustomError\nfrom libcommon.operations import delete_dataset, get_current_revision, update_dataset\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.storage_client import StorageClient\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nschema = {\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"properties\": {\n \"event\": {\"type\": \"string\", \"enum\": [\"add\", \"remove\", \"update\", \"move\"]},\n \"movedTo\": {\"type\": \"string\"},\n \"repo\": {\n \"type\": \"object\",\n \"properties\": {\n \"headSha\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"type\": {\"type\": \"string\", \"enum\": [\"dataset\", \"model\", \"space\"]},\n },\n \"required\": [\"type\", \"name\"],\n },\n \"scope\": {\n \"type\": \"string\",\n },\n },\n \"required\": [\"event\", \"repo\", \"scope\"],\n}\n\n\nclass _MoonWebhookV2PayloadRepo(TypedDict):\n type: Literal[\"model\", \"dataset\", \"space\"]\n name: str\n\n\nclass MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False):\n headSha: Optional[str]\n\n\nclass MoonWebhookV2Payload(TypedDict):\n \"\"\"\n Payload from a moon-landing webhook call, v2.\n \"\"\"\n\n event: Literal[\"add\", \"remove\", \"update\", \"move\"]\n movedTo: Optional[str]\n repo: MoonWebhookV2PayloadRepo\n scope: str\n\n\ndef parse_payload(json: Any) -> MoonWebhookV2Payload:\n validate(instance=json, schema=schema)\n return json # type: ignore\n # ^ validate() ensures the content is correct, but does not give the type\n\n\ndef process_payload(\n payload: MoonWebhookV2Payload,\n blocked_datasets: list[str],\n hf_endpoint: str,\n hf_token: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> None:\n if payload[\"repo\"][\"type\"] != \"dataset\" or payload[\"scope\"] not in (\"repo\", \"repo.content\"):\n # ^ it filters out the webhook calls for non-dataset repos and discussions in dataset repos\n return\n dataset = payload[\"repo\"][\"name\"]\n if dataset is None:\n return\n event = payload[\"event\"]\n if event == \"remove\":\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n elif event in [\"add\", \"update\", \"move\"]:\n if event == \"update\" and get_current_revision(dataset) == payload[\"repo\"][\"headSha\"]:\n # ^ it filters out the webhook calls when the refs/convert/parquet branch is updated\n logging.warning(\n f\"Webhook revision for {dataset} is the same as the current revision in the db - skipping update.\"\n )\n return\n delete_dataset(dataset=dataset, storage_clients=storage_clients)\n # ^ delete the old contents (cache + jobs + assets) to avoid mixed content\n new_dataset = (event == \"move\" and payload[\"movedTo\"]) or dataset\n update_dataset(\n dataset=new_dataset,\n priority=Priority.NORMAL,\n blocked_datasets=blocked_datasets,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n\n\ndef create_webhook_endpoint(\n blocked_datasets: list[str],\n hf_endpoint: str,\n hf_token: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n hf_webhook_secret: Optional[str] = None,\n storage_clients: Optional[list[StorageClient]] = None,\n) -> Endpoint:\n async def webhook_endpoint(request: Request) -> Response:\n with StepProfiler(method=\"webhook_endpoint\", step=\"all\"):\n with StepProfiler(method=\"webhook_endpoint\", step=\"get JSON\"):\n try:\n json = await request.json()\n except Exception:\n content = {\"status\": \"error\", \"error\": \"the body could not be parsed as a JSON\"}\n logging.info(\"/webhook: the body could not be parsed as a JSON.\")\n return get_response(content, 400)\n logging.info(f\"/webhook: {json}\")\n with StepProfiler(method=\"webhook_endpoint\", step=\"parse payload and headers\"):\n try:\n payload = parse_payload(json)\n except ValidationError as e:\n content = {\"status\": \"error\", \"error\": \"the JSON payload is invalid\"}\n logging.info(f\"/webhook: the JSON body is invalid. JSON: {json}. Error: {e}\")\n return get_response(content, 400)\n except Exception as e:\n logging.exception(\"Unexpected error\", exc_info=e)\n content = {\"status\": \"error\", \"error\": \"unexpected error\"}\n logging.warning(f\"/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}\")\n return get_response(content, 500)\n\n HEADER = \"x-webhook-secret\"\n trust_sender = (\n hf_webhook_secret is not None\n and (secret := request.headers.get(HEADER)) is not None\n and secret == hf_webhook_secret\n )\n if not trust_sender:\n logging.info(f\"/webhook: the sender is not trusted. JSON: {json}\")\n return get_response(\n {\"status\": \"error\", \"error\": \"The sender is not trusted. Retry with a valid secret.\"}, 400\n )\n\n with StepProfiler(method=\"webhook_endpoint\", step=\"process payload\"):\n try:\n process_payload(\n payload=payload,\n blocked_datasets=blocked_datasets,\n hf_endpoint=hf_endpoint,\n hf_token=hf_token,\n hf_timeout_seconds=hf_timeout_seconds,\n storage_clients=storage_clients,\n )\n except CustomError as e:\n content = {\"status\": \"error\", \"error\": \"the dataset is not supported\"}\n dataset = payload[\"repo\"][\"name\"]\n logging.debug(f\"/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}\")\n return get_response(content, 400)\n content = {\"status\": \"ok\"}\n return get_response(content, 200)\n\n return webhook_endpoint\n", "path": "services/api/src/api/routes/webhook.py"}]}
| 2,541 | 334 |
gh_patches_debug_33210
|
rasdani/github-patches
|
git_diff
|
zigpy__zha-device-handlers-1462
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] IKEA E1812 shortcut button does not use quirks
**Describe the bug**
I have two IKEA E1812 Tradfri shortcut buttons that started to behave differently. ZHA claims that they have the same firmware (I also bought them together), but they now have different clusters: The one that is not picked up correctly has an extra `in_cluster` with ID 0xfc7c in its device signature. This causes `zhaquirks.ikea.shortcutbtn.IkeaTradfriShortcutBtn` to not pick this button up, producing a *Fail because input cluster mismatch on at least one endpoint* message in the log.
The `zha_event` still fires as it should, so by using the raw event one can work around this, but maybe there's a way to get the quirks to work.
**To Reproduce**
- pair a shortcut button that has the 0xfc7c input cluster
- go to the device page and see that no quirks are applied
- try to add an automation, see that triggers for button press are missing
(Unfortunately I have no clue what caused the two buttons to present different signatures)
**Expected behavior**
The quirks should still apply
**Additional context**
signature of the button with the extra cluster (this is the only difference between my buttons)
**EDIT:** The WindowCovering output cluster is also missing on the weird button
```
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0820",
"in_clusters": [
"0x0000",
"0x0001",
"0x0003",
"0x0009",
"0x0020",
"0x1000",
"0xfc7c"
],
"out_clusters": [
"0x0003",
"0x0004",
"0x0006",
"0x0008",
"0x0019",
"0x1000"
]
}
},
"manufacturer": "IKEA of Sweden",
"model": "TRADFRI SHORTCUT Button",
"class": "zigpy.device.Device"
}
```
</issue>
<code>
[start of zhaquirks/ikea/shortcutbtn.py]
1 """Device handler for IKEA of Sweden TRADFRI shortcut button."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomDevice
4 from zigpy.zcl.clusters.closures import WindowCovering
5 from zigpy.zcl.clusters.general import (
6 Alarms,
7 Basic,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 PollControl,
14 PowerConfiguration,
15 )
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from zhaquirks.const import (
19 ARGS,
20 CLUSTER_ID,
21 COMMAND,
22 COMMAND_MOVE_ON_OFF,
23 COMMAND_OFF,
24 COMMAND_ON,
25 COMMAND_STOP,
26 DEVICE_TYPE,
27 DIM_UP,
28 DOUBLE_PRESS,
29 ENDPOINT_ID,
30 ENDPOINTS,
31 INPUT_CLUSTERS,
32 LONG_PRESS,
33 LONG_RELEASE,
34 MODELS_INFO,
35 OUTPUT_CLUSTERS,
36 PROFILE_ID,
37 SHORT_PRESS,
38 TURN_ON,
39 )
40 from zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster
41
42
43 class IkeaTradfriShortcutBtn(CustomDevice):
44 """Custom device representing IKEA of Sweden TRADFRI shortcut button."""
45
46 signature = {
47 # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
48 # device_version=1
49 # input_clusters=[0, 1, 3, 9, 32, 4096]
50 # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>
51 MODELS_INFO: [(IKEA, "TRADFRI SHORTCUT Button")],
52 ENDPOINTS: {
53 1: {
54 PROFILE_ID: zha.PROFILE_ID,
55 DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
56 INPUT_CLUSTERS: [
57 Basic.cluster_id,
58 PowerConfiguration.cluster_id,
59 Identify.cluster_id,
60 Alarms.cluster_id,
61 PollControl.cluster_id,
62 LightLink.cluster_id,
63 ],
64 OUTPUT_CLUSTERS: [
65 Identify.cluster_id,
66 Groups.cluster_id,
67 OnOff.cluster_id,
68 LevelControl.cluster_id,
69 Ota.cluster_id,
70 WindowCovering.cluster_id,
71 LightLink.cluster_id,
72 ],
73 }
74 },
75 }
76
77 replacement = {
78 ENDPOINTS: {
79 1: {
80 PROFILE_ID: zha.PROFILE_ID,
81 DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
82 INPUT_CLUSTERS: [
83 Basic.cluster_id,
84 PowerConfiguration1CRCluster,
85 Identify.cluster_id,
86 Alarms.cluster_id,
87 PollControl.cluster_id,
88 LightLinkCluster,
89 ],
90 OUTPUT_CLUSTERS: [
91 Identify.cluster_id,
92 Groups.cluster_id,
93 OnOff.cluster_id,
94 LevelControl.cluster_id,
95 Ota.cluster_id,
96 WindowCovering.cluster_id,
97 LightLink.cluster_id,
98 ],
99 }
100 }
101 }
102
103 device_automation_triggers = {
104 (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},
105 (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},
106 (LONG_PRESS, DIM_UP): {
107 COMMAND: COMMAND_MOVE_ON_OFF,
108 CLUSTER_ID: 8,
109 ENDPOINT_ID: 1,
110 ARGS: [0, 83],
111 },
112 (LONG_RELEASE, DIM_UP): {
113 COMMAND: COMMAND_STOP,
114 CLUSTER_ID: 8,
115 ENDPOINT_ID: 1,
116 },
117 }
118
[end of zhaquirks/ikea/shortcutbtn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zhaquirks/ikea/shortcutbtn.py b/zhaquirks/ikea/shortcutbtn.py
--- a/zhaquirks/ikea/shortcutbtn.py
+++ b/zhaquirks/ikea/shortcutbtn.py
@@ -115,3 +115,79 @@
ENDPOINT_ID: 1,
},
}
+
+
+class IkeaTradfriShortcutBtn2(CustomDevice):
+ """Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster."""
+
+ signature = {
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
+ # device_version=1
+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]
+ # output_clusters=[3, 4, 6, 8, 25, 4096]>
+ MODELS_INFO: [(IKEA, "TRADFRI SHORTCUT Button")],
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ PowerConfiguration.cluster_id,
+ Identify.cluster_id,
+ Alarms.cluster_id,
+ PollControl.cluster_id,
+ LightLink.cluster_id,
+ 0xFC7C, # IKEA Cluster
+ ],
+ OUTPUT_CLUSTERS: [
+ Identify.cluster_id,
+ Groups.cluster_id,
+ OnOff.cluster_id,
+ LevelControl.cluster_id,
+ Ota.cluster_id,
+ LightLink.cluster_id,
+ ],
+ }
+ },
+ }
+
+ replacement = {
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ PowerConfiguration1CRCluster,
+ Identify.cluster_id,
+ Alarms.cluster_id,
+ PollControl.cluster_id,
+ LightLinkCluster,
+ ],
+ OUTPUT_CLUSTERS: [
+ Identify.cluster_id,
+ Groups.cluster_id,
+ OnOff.cluster_id,
+ LevelControl.cluster_id,
+ Ota.cluster_id,
+ LightLink.cluster_id,
+ ],
+ }
+ }
+ }
+
+ device_automation_triggers = {
+ (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},
+ (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},
+ (LONG_PRESS, DIM_UP): {
+ COMMAND: COMMAND_MOVE_ON_OFF,
+ CLUSTER_ID: 8,
+ ENDPOINT_ID: 1,
+ ARGS: [0, 83],
+ },
+ (LONG_RELEASE, DIM_UP): {
+ COMMAND: COMMAND_STOP,
+ CLUSTER_ID: 8,
+ ENDPOINT_ID: 1,
+ },
+ }
|
{"golden_diff": "diff --git a/zhaquirks/ikea/shortcutbtn.py b/zhaquirks/ikea/shortcutbtn.py\n--- a/zhaquirks/ikea/shortcutbtn.py\n+++ b/zhaquirks/ikea/shortcutbtn.py\n@@ -115,3 +115,79 @@\n ENDPOINT_ID: 1,\n },\n }\n+\n+\n+class IkeaTradfriShortcutBtn2(CustomDevice):\n+ \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster.\"\"\"\n+\n+ signature = {\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n+ # device_version=1\n+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n+ # output_clusters=[3, 4, 6, 8, 25, 4096]>\n+ MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ PowerConfiguration.cluster_id,\n+ Identify.cluster_id,\n+ Alarms.cluster_id,\n+ PollControl.cluster_id,\n+ LightLink.cluster_id,\n+ 0xFC7C, # IKEA Cluster\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Ota.cluster_id,\n+ LightLink.cluster_id,\n+ ],\n+ }\n+ },\n+ }\n+\n+ replacement = {\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ PowerConfiguration1CRCluster,\n+ Identify.cluster_id,\n+ Alarms.cluster_id,\n+ PollControl.cluster_id,\n+ LightLinkCluster,\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Ota.cluster_id,\n+ LightLink.cluster_id,\n+ ],\n+ }\n+ }\n+ }\n+\n+ device_automation_triggers = {\n+ (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n+ (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n+ (LONG_PRESS, DIM_UP): {\n+ COMMAND: COMMAND_MOVE_ON_OFF,\n+ CLUSTER_ID: 8,\n+ ENDPOINT_ID: 1,\n+ ARGS: [0, 83],\n+ },\n+ (LONG_RELEASE, DIM_UP): {\n+ COMMAND: COMMAND_STOP,\n+ CLUSTER_ID: 8,\n+ ENDPOINT_ID: 1,\n+ },\n+ }\n", "issue": "[BUG] IKEA E1812 shortcut button does not use quirks\n**Describe the bug**\r\nI have two IKEA E1812 Tradfri shortcut buttons that started to behave differently. ZHA claims that they have the same firmware (I also bought them together), but they now have different clusters: The one that is not picked up correctly has an extra `in_cluster` with ID 0xfc7c in its device signature. This causes `zhaquirks.ikea.shortcutbtn.IkeaTradfriShortcutBtn` to not pick this button up, producing a *Fail because input cluster mismatch on at least one endpoint* message in the log.\r\n\r\nThe `zha_event` still fires as it should, so by using the raw event one can work around this, but maybe there's a way to get the quirks to work.\r\n\r\n**To Reproduce**\r\n- pair a shortcut button that has the 0xfc7c input cluster\r\n- go to the device page and see that no quirks are applied\r\n- try to add an automation, see that triggers for button press are missing\r\n\r\n(Unfortunately I have no clue what caused the two buttons to present different signatures)\r\n\r\n**Expected behavior**\r\nThe quirks should still apply\r\n\r\n**Additional context**\r\nsignature of the button with the extra cluster (this is the only difference between my buttons)\r\n**EDIT:** The WindowCovering output cluster is also missing on the weird button\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0820\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0001\",\r\n \"0x0003\",\r\n \"0x0009\",\r\n \"0x0020\",\r\n \"0x1000\",\r\n \"0xfc7c\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0006\",\r\n \"0x0008\",\r\n \"0x0019\",\r\n \"0x1000\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"IKEA of Sweden\",\r\n \"model\": \"TRADFRI SHORTCUT Button\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI shortcut button.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_MOVE_ON_OFF,\n COMMAND_OFF,\n COMMAND_ON,\n COMMAND_STOP,\n DEVICE_TYPE,\n DIM_UP,\n DOUBLE_PRESS,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n LONG_RELEASE,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_ON,\n)\nfrom zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster\n\n\nclass IkeaTradfriShortcutBtn(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n", "path": "zhaquirks/ikea/shortcutbtn.py"}]}
| 2,261 | 711 |
gh_patches_debug_16823
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-505
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.9.4 Release
@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?
</issue>
<code>
[start of tools/prep_zenodo.py]
1 #!/usr/bin/env python3
2 import git
3 import json
4 from subprocess import run, PIPE, CalledProcessError
5 from pathlib import Path
6 from tempfile import TemporaryDirectory
7
8
9 def decommify(name):
10 return ' '.join(name.split(', ')[::-1])
11
12
13 # List of repositories whose commits should be counted as contributions
14 codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
15
16 # Last shablona commit
17 origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
18
19 git_root = Path(git.Repo('.', search_parent_directories=True).working_dir)
20 zenodo_file = git_root / '.zenodo.json'
21
22 zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}
23
24 orig_creators = zenodo.get('creators', [])
25 creator_map = {decommify(creator['name']): creator
26 for creator in orig_creators}
27
28 shortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)
29 counts = [line.split('\t', 1)[::-1]
30 for line in shortlog.stdout.decode().split('\n') if line]
31
32 # Get additional commit counts from dependencies
33 with TemporaryDirectory() as tmpdir:
34 tmppath = Path(tmpdir)
35 for repo in codependents:
36 try:
37 repo, ref = repo
38 except (TypeError, ValueError):
39 ref = None
40 repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
41 try:
42 clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)
43 except CalledProcessError as err:
44 raise RuntimeError("Could not clone {}".format(repo)) from err
45
46 if ref is None:
47 tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
48 # latest tag
49 ref = tag.stdout.decode().strip().rsplit('\n', 1)[1]
50
51 dep_shortlog = run(
52 ['git', '-C', repo_dir, 'shortlog', '-ns', ref],
53 stdout=PIPE)
54 counts.extend(line.split('\t', 1)[::-1]
55 for line in dep_shortlog.stdout.decode().split('\n')
56 if line)
57
58 commit_counts = {}
59 for committer, commits in counts:
60 commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)
61
62 # Stable sort:
63 # Number of commits in reverse order
64 # Ties broken by alphabetical order of first name
65 committers = [committer
66 for committer, _ in sorted(commit_counts.items(),
67 key=lambda x: (-x[1], x[0]))]
68
69 # Tal to the top
70 first_author = 'Tal Yarkoni'
71 if committers[0] != first_author:
72 committers.remove(first_author)
73 committers.insert(0, first_author)
74
75 creators = [
76 creator_map.get(committer, {'name': committer})
77 for committer in committers
78 ]
79
80 zenodo['creators'] = creators
81 zenodo_file.write_text(json.dumps(zenodo, indent=2) + '\n')
82
[end of tools/prep_zenodo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py
--- a/tools/prep_zenodo.py
+++ b/tools/prep_zenodo.py
@@ -9,6 +9,11 @@
def decommify(name):
return ' '.join(name.split(', ')[::-1])
+# Users who have asked not to be cited at this time
+# XXX We should add a shortlog since the most recent tag and explicitly note
+# that a blacklisted user has contributed again recently, and verify they still
+# do not want to be cited.
+blacklist = {'Cecile Madjar'}
# List of repositories whose commits should be counted as contributions
codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
@@ -75,6 +80,7 @@
creators = [
creator_map.get(committer, {'name': committer})
for committer in committers
+ if committer not in blacklist
]
zenodo['creators'] = creators
|
{"golden_diff": "diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py\n--- a/tools/prep_zenodo.py\n+++ b/tools/prep_zenodo.py\n@@ -9,6 +9,11 @@\n def decommify(name):\n return ' '.join(name.split(', ')[::-1])\n \n+# Users who have asked not to be cited at this time\n+# XXX We should add a shortlog since the most recent tag and explicitly note\n+# that a blacklisted user has contributed again recently, and verify they still\n+# do not want to be cited.\n+blacklist = {'Cecile Madjar'}\n \n # List of repositories whose commits should be counted as contributions\n codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n@@ -75,6 +80,7 @@\n creators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n+ if committer not in blacklist\n ]\n \n zenodo['creators'] = creators\n", "issue": "0.9.4 Release\n@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2) + '\\n')\n", "path": "tools/prep_zenodo.py"}]}
| 1,475 | 228 |
gh_patches_debug_26541
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-5732
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add tests for domain's RoleManager
## Description
Add unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`
## Type of Test
- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)
- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)
- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)
- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)
- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)
- [ ] Other...
## Expected Behavior
Ideally, the tests should cover as many methods as possible and within reason.
</issue>
<code>
[start of packages/grid/apps/domain/src/main/core/manager/role_manager.py]
1 # stdlib
2 from typing import List
3 from typing import Union
4
5 # grid relative
6 from ..database.tables.roles import Role
7 from ..exceptions import RoleNotFoundError
8 from .database_manager import DatabaseManager
9
10
11 class RoleManager(DatabaseManager):
12
13 schema = Role
14
15 def __init__(self, database):
16 self._schema = RoleManager.schema
17 self.db = database
18
19 @property
20 def user_role(self):
21 return self.first(name="User")
22
23 @property
24 def owner_role(self):
25 return self.first(name="Owner")
26
27 @property
28 def compliance_officer_role(self):
29 return self.first(name="Compliance Officer")
30
31 @property
32 def admin_role(self):
33 return self.first(name="Administrator")
34
35 @property
36 def common_roles(self):
37 return self.db.session.query(self._schema).filter_by(
38 can_triage_requests=False,
39 can_edit_settings=False,
40 can_create_users=False,
41 can_create_groups=False,
42 can_upload_data=False,
43 can_edit_roles=False,
44 can_manage_infrastructure=False,
45 )
46
47 @property
48 def org_roles(self):
49 return self.db.session.query(self._schema).except_(self.common_roles)
50
51 def first(self, **kwargs) -> Union[None, List]:
52 result = super().first(**kwargs)
53 if not result:
54 raise RoleNotFoundError
55 return result
56
57 def query(self, **kwargs) -> Union[None, List]:
58 results = super().query(**kwargs)
59 if len(results) == 0:
60 raise RoleNotFoundError
61 return results
62
63 def set(self, role_id, params):
64 if self.contain(id=role_id):
65 self.modify({"id": role_id}, params)
66 else:
67 raise RoleNotFoundError
68
[end of packages/grid/apps/domain/src/main/core/manager/role_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py
--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py
+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py
@@ -2,6 +2,8 @@
from typing import List
from typing import Union
+from flask_sqlalchemy import BaseQuery
+
# grid relative
from ..database.tables.roles import Role
from ..exceptions import RoleNotFoundError
@@ -32,8 +34,7 @@
def admin_role(self):
return self.first(name="Administrator")
- @property
- def common_roles(self):
+ def _common_roles(self) -> BaseQuery:
return self.db.session.query(self._schema).filter_by(
can_triage_requests=False,
can_edit_settings=False,
@@ -44,9 +45,13 @@
can_manage_infrastructure=False,
)
+ @property
+ def common_roles(self):
+ return self._common_roles().all()
+
@property
def org_roles(self):
- return self.db.session.query(self._schema).except_(self.common_roles)
+ return self.db.session.query(self._schema).except_(self._common_roles).all()
def first(self, **kwargs) -> Union[None, List]:
result = super().first(**kwargs)
|
{"golden_diff": "diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n@@ -2,6 +2,8 @@\n from typing import List\n from typing import Union\n \n+from flask_sqlalchemy import BaseQuery\n+\n # grid relative\n from ..database.tables.roles import Role\n from ..exceptions import RoleNotFoundError\n@@ -32,8 +34,7 @@\n def admin_role(self):\n return self.first(name=\"Administrator\")\n \n- @property\n- def common_roles(self):\n+ def _common_roles(self) -> BaseQuery:\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n@@ -44,9 +45,13 @@\n can_manage_infrastructure=False,\n )\n \n+ @property\n+ def common_roles(self):\n+ return self._common_roles().all()\n+\n @property\n def org_roles(self):\n- return self.db.session.query(self._schema).except_(self.common_roles)\n+ return self.db.session.query(self._schema).except_(self._common_roles).all()\n \n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n", "issue": "Add tests for domain's RoleManager\n## Description\r\nAdd unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`\r\n\r\n## Type of Test\r\n- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)\r\n- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)\r\n- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)\r\n- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)\r\n- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)\r\n- [ ] Other...\r\n\r\n## Expected Behavior\r\nIdeally, the tests should cover as many methods as possible and within reason.\r\n\n", "before_files": [{"content": "# stdlib\nfrom typing import List\nfrom typing import Union\n\n# grid relative\nfrom ..database.tables.roles import Role\nfrom ..exceptions import RoleNotFoundError\nfrom .database_manager import DatabaseManager\n\n\nclass RoleManager(DatabaseManager):\n\n schema = Role\n\n def __init__(self, database):\n self._schema = RoleManager.schema\n self.db = database\n\n @property\n def user_role(self):\n return self.first(name=\"User\")\n\n @property\n def owner_role(self):\n return self.first(name=\"Owner\")\n\n @property\n def compliance_officer_role(self):\n return self.first(name=\"Compliance Officer\")\n\n @property\n def admin_role(self):\n return self.first(name=\"Administrator\")\n\n @property\n def common_roles(self):\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n can_create_users=False,\n can_create_groups=False,\n can_upload_data=False,\n can_edit_roles=False,\n can_manage_infrastructure=False,\n )\n\n @property\n def org_roles(self):\n return self.db.session.query(self._schema).except_(self.common_roles)\n\n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n if not result:\n raise RoleNotFoundError\n return result\n\n def query(self, **kwargs) -> Union[None, List]:\n results = super().query(**kwargs)\n if len(results) == 0:\n raise RoleNotFoundError\n return results\n\n def set(self, role_id, params):\n if self.contain(id=role_id):\n self.modify({\"id\": role_id}, params)\n else:\n raise RoleNotFoundError\n", "path": "packages/grid/apps/domain/src/main/core/manager/role_manager.py"}]}
| 1,259 | 308 |
gh_patches_debug_45891
|
rasdani/github-patches
|
git_diff
|
pypa__pip-3213
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip list outdated
I install Flask version 0.8 with pip in editable mode
`pip install -e git+https://github.com/mitsuhiko/[email protected]#egg=flask`
Output of pip list
```
argparse (1.2.1)
Flask (0.8dev-20130506, /home/pratz/VirtualEnv/learnflask/src/flask)
Jinja2 (2.6)
Werkzeug (0.8.3)
wsgiref (0.1.2)
```
Ouput of pip list -o
Nothing ( returns to console without any output )
Should not the output of pip list -o be
`Flask (Current: 0.8dev Latest: 0.9)`
Or is this how pip list -o works for editable mode ?
NOTE: pip version is 1.3.1
</issue>
<code>
[start of pip/commands/list.py]
1 from __future__ import absolute_import
2
3 import logging
4 import warnings
5
6 from pip.basecommand import Command
7 from pip.exceptions import DistributionNotFound
8 from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
9 from pip.req import InstallRequirement
10 from pip.utils import (
11 get_installed_distributions, dist_is_editable, canonicalize_name)
12 from pip.utils.deprecation import RemovedInPip10Warning
13 from pip.wheel import WheelCache
14 from pip.cmdoptions import make_option_group, index_group
15
16
17 logger = logging.getLogger(__name__)
18
19
20 class ListCommand(Command):
21 """
22 List installed packages, including editables.
23
24 Packages are listed in a case-insensitive sorted order.
25 """
26 name = 'list'
27 usage = """
28 %prog [options]"""
29 summary = 'List installed packages.'
30
31 def __init__(self, *args, **kw):
32 super(ListCommand, self).__init__(*args, **kw)
33
34 cmd_opts = self.cmd_opts
35
36 cmd_opts.add_option(
37 '-o', '--outdated',
38 action='store_true',
39 default=False,
40 help='List outdated packages (excluding editables)')
41 cmd_opts.add_option(
42 '-u', '--uptodate',
43 action='store_true',
44 default=False,
45 help='List uptodate packages (excluding editables)')
46 cmd_opts.add_option(
47 '-e', '--editable',
48 action='store_true',
49 default=False,
50 help='List editable projects.')
51 cmd_opts.add_option(
52 '-l', '--local',
53 action='store_true',
54 default=False,
55 help=('If in a virtualenv that has global access, do not list '
56 'globally-installed packages.'),
57 )
58 self.cmd_opts.add_option(
59 '--user',
60 dest='user',
61 action='store_true',
62 default=False,
63 help='Only output packages installed in user-site.')
64
65 cmd_opts.add_option(
66 '--pre',
67 action='store_true',
68 default=False,
69 help=("Include pre-release and development versions. By default, "
70 "pip only finds stable versions."),
71 )
72
73 index_opts = make_option_group(index_group, self.parser)
74
75 self.parser.insert_option_group(0, index_opts)
76 self.parser.insert_option_group(0, cmd_opts)
77
78 def _build_package_finder(self, options, index_urls, session):
79 """
80 Create a package finder appropriate to this list command.
81 """
82 return PackageFinder(
83 find_links=options.find_links,
84 index_urls=index_urls,
85 allow_all_prereleases=options.pre,
86 trusted_hosts=options.trusted_hosts,
87 process_dependency_links=options.process_dependency_links,
88 session=session,
89 )
90
91 def run(self, options, args):
92 if options.allow_external:
93 warnings.warn(
94 "--allow-external has been deprecated and will be removed in "
95 "the future. Due to changes in the repository protocol, it no "
96 "longer has any effect.",
97 RemovedInPip10Warning,
98 )
99
100 if options.allow_all_external:
101 warnings.warn(
102 "--allow-all-external has been deprecated and will be removed "
103 "in the future. Due to changes in the repository protocol, it "
104 "no longer has any effect.",
105 RemovedInPip10Warning,
106 )
107
108 if options.allow_unverified:
109 warnings.warn(
110 "--allow-unverified has been deprecated and will be removed "
111 "in the future. Due to changes in the repository protocol, it "
112 "no longer has any effect.",
113 RemovedInPip10Warning,
114 )
115
116 if options.outdated:
117 self.run_outdated(options)
118 elif options.uptodate:
119 self.run_uptodate(options)
120 elif options.editable:
121 self.run_editables(options)
122 else:
123 self.run_listing(options)
124
125 def run_outdated(self, options):
126 for dist, version, typ in self.find_packages_latest_versions(options):
127 if version > dist.parsed_version:
128 logger.info(
129 '%s (Current: %s Latest: %s [%s])',
130 dist.project_name, dist.version, version, typ,
131 )
132
133 def find_packages_latest_versions(self, options):
134 index_urls = [options.index_url] + options.extra_index_urls
135 if options.no_index:
136 logger.info('Ignoring indexes: %s', ','.join(index_urls))
137 index_urls = []
138
139 dependency_links = []
140 for dist in get_installed_distributions(local_only=options.local,
141 user_only=options.user):
142 if dist.has_metadata('dependency_links.txt'):
143 dependency_links.extend(
144 dist.get_metadata_lines('dependency_links.txt'),
145 )
146
147 with self._build_session(options) as session:
148 finder = self._build_package_finder(options, index_urls, session)
149 finder.add_dependency_links(dependency_links)
150
151 installed_packages = get_installed_distributions(
152 local_only=options.local,
153 user_only=options.user,
154 include_editables=False,
155 )
156 format_control = FormatControl(set(), set())
157 wheel_cache = WheelCache(options.cache_dir, format_control)
158 for dist in installed_packages:
159 req = InstallRequirement.from_line(
160 dist.key, None, isolated=options.isolated_mode,
161 wheel_cache=wheel_cache
162 )
163 typ = 'unknown'
164 try:
165 link = finder.find_requirement(req, True)
166
167 # If link is None, means installed version is most
168 # up-to-date
169 if link is None:
170 continue
171 except DistributionNotFound:
172 continue
173 else:
174 canonical_name = canonicalize_name(req.name)
175 formats = fmt_ctl_formats(format_control, canonical_name)
176 search = Search(
177 req.name,
178 canonical_name,
179 formats)
180 remote_version = finder._link_package_versions(
181 link, search).version
182 if link.is_wheel:
183 typ = 'wheel'
184 else:
185 typ = 'sdist'
186 yield dist, remote_version, typ
187
188 def run_listing(self, options):
189 installed_packages = get_installed_distributions(
190 local_only=options.local,
191 user_only=options.user,
192 )
193 self.output_package_listing(installed_packages)
194
195 def run_editables(self, options):
196 installed_packages = get_installed_distributions(
197 local_only=options.local,
198 user_only=options.user,
199 editables_only=True,
200 )
201 self.output_package_listing(installed_packages)
202
203 def output_package_listing(self, installed_packages):
204 installed_packages = sorted(
205 installed_packages,
206 key=lambda dist: dist.project_name.lower(),
207 )
208 for dist in installed_packages:
209 if dist_is_editable(dist):
210 line = '%s (%s, %s)' % (
211 dist.project_name,
212 dist.version,
213 dist.location,
214 )
215 else:
216 line = '%s (%s)' % (dist.project_name, dist.version)
217 logger.info(line)
218
219 def run_uptodate(self, options):
220 uptodate = []
221 for dist, version, typ in self.find_packages_latest_versions(options):
222 if dist.parsed_version == version:
223 uptodate.append(dist)
224 self.output_package_listing(uptodate)
225
[end of pip/commands/list.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/commands/list.py b/pip/commands/list.py
--- a/pip/commands/list.py
+++ b/pip/commands/list.py
@@ -4,7 +4,7 @@
import warnings
from pip.basecommand import Command
-from pip.exceptions import DistributionNotFound
+from pip.exceptions import CommandError, DistributionNotFound
from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search
from pip.req import InstallRequirement
from pip.utils import (
@@ -37,12 +37,12 @@
'-o', '--outdated',
action='store_true',
default=False,
- help='List outdated packages (excluding editables)')
+ help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
- help='List uptodate packages (excluding editables)')
+ help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
@@ -112,22 +112,25 @@
"no longer has any effect.",
RemovedInPip10Warning,
)
+ if options.outdated and options.uptodate:
+ raise CommandError(
+ "Options --outdated and --uptodate cannot be combined.")
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
- elif options.editable:
- self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
- for dist, version, typ in self.find_packages_latest_versions(options):
- if version > dist.parsed_version:
+ for dist, latest_version, typ in sorted(
+ self.find_packages_latest_versions(options),
+ key=lambda p: p[0].project_name.lower()):
+ if latest_version > dist.parsed_version:
logger.info(
- '%s (Current: %s Latest: %s [%s])',
- dist.project_name, dist.version, version, typ,
+ '%s - Latest: %s [%s]',
+ self.output_package(dist), latest_version, typ,
)
def find_packages_latest_versions(self, options):
@@ -137,8 +140,10 @@
index_urls = []
dependency_links = []
- for dist in get_installed_distributions(local_only=options.local,
- user_only=options.user):
+ for dist in get_installed_distributions(
+ local_only=options.local,
+ user_only=options.user,
+ editables_only=options.editable):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
@@ -151,7 +156,7 @@
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
- include_editables=False,
+ editables_only=options.editable,
)
format_control = FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
@@ -189,16 +194,19 @@
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
+ editables_only=options.editable,
)
self.output_package_listing(installed_packages)
- def run_editables(self, options):
- installed_packages = get_installed_distributions(
- local_only=options.local,
- user_only=options.user,
- editables_only=True,
- )
- self.output_package_listing(installed_packages)
+ def output_package(self, dist):
+ if dist_is_editable(dist):
+ return '%s (%s, %s)' % (
+ dist.project_name,
+ dist.version,
+ dist.location,
+ )
+ else:
+ return '%s (%s)' % (dist.project_name, dist.version)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
@@ -206,15 +214,7 @@
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
- if dist_is_editable(dist):
- line = '%s (%s, %s)' % (
- dist.project_name,
- dist.version,
- dist.location,
- )
- else:
- line = '%s (%s)' % (dist.project_name, dist.version)
- logger.info(line)
+ logger.info(self.output_package(dist))
def run_uptodate(self, options):
uptodate = []
|
{"golden_diff": "diff --git a/pip/commands/list.py b/pip/commands/list.py\n--- a/pip/commands/list.py\n+++ b/pip/commands/list.py\n@@ -4,7 +4,7 @@\n import warnings\n \n from pip.basecommand import Command\n-from pip.exceptions import DistributionNotFound\n+from pip.exceptions import CommandError, DistributionNotFound\n from pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search\n from pip.req import InstallRequirement\n from pip.utils import (\n@@ -37,12 +37,12 @@\n '-o', '--outdated',\n action='store_true',\n default=False,\n- help='List outdated packages (excluding editables)')\n+ help='List outdated packages')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n- help='List uptodate packages (excluding editables)')\n+ help='List uptodate packages')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n@@ -112,22 +112,25 @@\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n+ if options.outdated and options.uptodate:\n+ raise CommandError(\n+ \"Options --outdated and --uptodate cannot be combined.\")\n \n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n- elif options.editable:\n- self.run_editables(options)\n else:\n self.run_listing(options)\n \n def run_outdated(self, options):\n- for dist, version, typ in self.find_packages_latest_versions(options):\n- if version > dist.parsed_version:\n+ for dist, latest_version, typ in sorted(\n+ self.find_packages_latest_versions(options),\n+ key=lambda p: p[0].project_name.lower()):\n+ if latest_version > dist.parsed_version:\n logger.info(\n- '%s (Current: %s Latest: %s [%s])',\n- dist.project_name, dist.version, version, typ,\n+ '%s - Latest: %s [%s]',\n+ self.output_package(dist), latest_version, typ,\n )\n \n def find_packages_latest_versions(self, options):\n@@ -137,8 +140,10 @@\n index_urls = []\n \n dependency_links = []\n- for dist in get_installed_distributions(local_only=options.local,\n- user_only=options.user):\n+ for dist in get_installed_distributions(\n+ local_only=options.local,\n+ user_only=options.user,\n+ editables_only=options.editable):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n@@ -151,7 +156,7 @@\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n- include_editables=False,\n+ editables_only=options.editable,\n )\n format_control = FormatControl(set(), set())\n wheel_cache = WheelCache(options.cache_dir, format_control)\n@@ -189,16 +194,19 @@\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n+ editables_only=options.editable,\n )\n self.output_package_listing(installed_packages)\n \n- def run_editables(self, options):\n- installed_packages = get_installed_distributions(\n- local_only=options.local,\n- user_only=options.user,\n- editables_only=True,\n- )\n- self.output_package_listing(installed_packages)\n+ def output_package(self, dist):\n+ if dist_is_editable(dist):\n+ return '%s (%s, %s)' % (\n+ dist.project_name,\n+ dist.version,\n+ dist.location,\n+ )\n+ else:\n+ return '%s (%s)' % (dist.project_name, dist.version)\n \n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n@@ -206,15 +214,7 @@\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n- if dist_is_editable(dist):\n- line = '%s (%s, %s)' % (\n- dist.project_name,\n- dist.version,\n- dist.location,\n- )\n- else:\n- line = '%s (%s)' % (dist.project_name, dist.version)\n- logger.info(line)\n+ logger.info(self.output_package(dist))\n \n def run_uptodate(self, options):\n uptodate = []\n", "issue": "pip list outdated\nI install Flask version 0.8 with pip in editable mode \n\n`pip install -e git+https://github.com/mitsuhiko/[email protected]#egg=flask`\n\nOutput of pip list \n\n```\nargparse (1.2.1)\nFlask (0.8dev-20130506, /home/pratz/VirtualEnv/learnflask/src/flask)\nJinja2 (2.6)\nWerkzeug (0.8.3)\nwsgiref (0.1.2)\n```\n\nOuput of pip list -o\nNothing ( returns to console without any output )\n\nShould not the output of pip list -o be\n`Flask (Current: 0.8dev Latest: 0.9)`\n\nOr is this how pip list -o works for editable mode ?\n\nNOTE: pip version is 1.3.1\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom pip.basecommand import Command\nfrom pip.exceptions import DistributionNotFound\nfrom pip.index import FormatControl, fmt_ctl_formats, PackageFinder, Search\nfrom pip.req import InstallRequirement\nfrom pip.utils import (\n get_installed_distributions, dist_is_editable, canonicalize_name)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip.wheel import WheelCache\nfrom pip.cmdoptions import make_option_group, index_group\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListCommand(Command):\n \"\"\"\n List installed packages, including editables.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n name = 'list'\n usage = \"\"\"\n %prog [options]\"\"\"\n summary = 'List installed packages.'\n\n def __init__(self, *args, **kw):\n super(ListCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(\n '-o', '--outdated',\n action='store_true',\n default=False,\n help='List outdated packages (excluding editables)')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n help='List uptodate packages (excluding editables)')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n default=False,\n help='List editable projects.')\n cmd_opts.add_option(\n '-l', '--local',\n action='store_true',\n default=False,\n help=('If in a virtualenv that has global access, do not list '\n 'globally-installed packages.'),\n )\n self.cmd_opts.add_option(\n '--user',\n dest='user',\n action='store_true',\n default=False,\n help='Only output packages installed in user-site.')\n\n cmd_opts.add_option(\n '--pre',\n action='store_true',\n default=False,\n help=(\"Include pre-release and development versions. By default, \"\n \"pip only finds stable versions.\"),\n )\n\n index_opts = make_option_group(index_group, self.parser)\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def _build_package_finder(self, options, index_urls, session):\n \"\"\"\n Create a package finder appropriate to this list command.\n \"\"\"\n return PackageFinder(\n find_links=options.find_links,\n index_urls=index_urls,\n allow_all_prereleases=options.pre,\n trusted_hosts=options.trusted_hosts,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n\n def run(self, options, args):\n if options.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n elif options.editable:\n self.run_editables(options)\n else:\n self.run_listing(options)\n\n def run_outdated(self, options):\n for dist, version, typ in self.find_packages_latest_versions(options):\n if version > dist.parsed_version:\n logger.info(\n '%s (Current: %s Latest: %s [%s])',\n dist.project_name, dist.version, version, typ,\n )\n\n def find_packages_latest_versions(self, options):\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n dependency_links = []\n for dist in get_installed_distributions(local_only=options.local,\n user_only=options.user):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n )\n\n with self._build_session(options) as session:\n finder = self._build_package_finder(options, index_urls, session)\n finder.add_dependency_links(dependency_links)\n\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n include_editables=False,\n )\n format_control = FormatControl(set(), set())\n wheel_cache = WheelCache(options.cache_dir, format_control)\n for dist in installed_packages:\n req = InstallRequirement.from_line(\n dist.key, None, isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n typ = 'unknown'\n try:\n link = finder.find_requirement(req, True)\n\n # If link is None, means installed version is most\n # up-to-date\n if link is None:\n continue\n except DistributionNotFound:\n continue\n else:\n canonical_name = canonicalize_name(req.name)\n formats = fmt_ctl_formats(format_control, canonical_name)\n search = Search(\n req.name,\n canonical_name,\n formats)\n remote_version = finder._link_package_versions(\n link, search).version\n if link.is_wheel:\n typ = 'wheel'\n else:\n typ = 'sdist'\n yield dist, remote_version, typ\n\n def run_listing(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n )\n self.output_package_listing(installed_packages)\n\n def run_editables(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=True,\n )\n self.output_package_listing(installed_packages)\n\n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n installed_packages,\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n if dist_is_editable(dist):\n line = '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n else:\n line = '%s (%s)' % (dist.project_name, dist.version)\n logger.info(line)\n\n def run_uptodate(self, options):\n uptodate = []\n for dist, version, typ in self.find_packages_latest_versions(options):\n if dist.parsed_version == version:\n uptodate.append(dist)\n self.output_package_listing(uptodate)\n", "path": "pip/commands/list.py"}]}
| 2,794 | 1,003 |
gh_patches_debug_59199
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-380
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more information links to package
Add these links to `project_urls` argument of `setup.py`.
- Source Code: https://github.com/Nitrate/Nitrate
- Issue Tracker: https://github.com/Nitrate/Nitrate/issues
- Documentation: https://nitrate.readthedocs.io/
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 with open('VERSION.txt', 'r') as f:
9 pkg_version = f.read().strip()
10
11
12 def get_long_description():
13 with open('README.rst', 'r') as f:
14 return f.read()
15
16
17 install_requires = [
18 'beautifulsoup4 >= 4.1.1',
19 'django >= 1.11,<3.0',
20 'django-contrib-comments == 1.8.0',
21 'django-tinymce == 2.7.0',
22 'django-uuslug == 1.1.8',
23 'html2text',
24 'odfpy >= 0.9.6',
25 'python-bugzilla',
26 'six',
27 'xmltodict',
28 'kobo == 0.9.0'
29 ]
30
31 if sys.version_info.major < 3:
32 install_requires += [
33 'enum34',
34 ]
35
36 extras_require = {
37 'mysql': ['PyMySQL == 0.9.2'],
38 'pgsql': ['psycopg2 == 2.7.5'],
39
40 # Required for tcms.core.contrib.auth.backends.KerberosBackend
41 'krbauth': [
42 'kerberos == 1.2.5'
43 ],
44
45 # Packages for building documentation
46 'docs': [
47 'Sphinx >= 1.1.2',
48 'sphinx_rtd_theme',
49 ],
50
51 # Necessary packages for running tests
52 'tests': [
53 'beautifulsoup4',
54 'coverage',
55 'factory_boy',
56 'flake8',
57 'mock',
58 'pytest',
59 'pytest-cov',
60 'pytest-django',
61 ],
62
63 # Contain tools that assists the development
64 'devtools': [
65 'django-debug-toolbar == 1.7',
66 'tox',
67 'django-extensions',
68 'pygraphviz',
69 'future-breakpoint',
70 ],
71
72 # Required packages required to run async tasks
73 'async': [
74 'celery == 4.2.0',
75 ]
76 }
77
78 setup(
79 name='Nitrate',
80 version=pkg_version,
81 description='Test Case Management System',
82 long_description=get_long_description(),
83 author='Nitrate Team',
84 maintainer='Chenxiong Qi',
85 maintainer_email='[email protected]',
86 url='https://github.com/Nitrate/Nitrate/',
87 license='GPLv2+',
88 keywords='test case',
89 install_requires=install_requires,
90 extras_require=extras_require,
91 packages=find_packages(),
92 include_package_data=True,
93 classifiers=[
94 'Framework :: Django',
95 'Framework :: Django :: 1.11',
96 'Framework :: Django :: 2.0',
97 'Framework :: Django :: 2.1',
98 'Intended Audience :: Developers',
99 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
100 'Programming Language :: Python :: 2',
101 'Programming Language :: Python :: 2.7',
102 'Programming Language :: Python :: 3',
103 'Programming Language :: Python :: 3.6',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: Software Development :: Quality Assurance',
106 'Topic :: Software Development :: Testing',
107 ],
108 )
109
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,4 +105,9 @@
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
+ project_urls={
+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
+ 'Source Code': 'https://github.com/Nitrate/Nitrate',
+ 'Documentation': 'https://nitrate.readthedocs.io/',
+ },
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,4 +105,9 @@\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n+ project_urls={\n+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n+ 'Source Code': 'https://github.com/Nitrate/Nitrate',\n+ 'Documentation': 'https://nitrate.readthedocs.io/',\n+ },\n )\n", "issue": "Add more information links to package\nAdd these links to `project_urls` argument of `setup.py`.\r\n\r\n- Source Code: https://github.com/Nitrate/Nitrate\r\n- Issue Tracker: https://github.com/Nitrate/Nitrate/issues\r\n- Documentation: https://nitrate.readthedocs.io/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]}
| 1,540 | 116 |
gh_patches_debug_17383
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-5508
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] sqlcipher/4.4.3
### Package Details
* Package Name/Version: ** sqlcipher/4.4.3**
* Changelog: **https://github.com/sqlcipher/sqlcipher/blob/master/CHANGELOG.md**
----
* works on Mac Catalyst
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
</issue>
<code>
[start of recipes/sqlcipher/all/conanfile.py]
1 from conans import tools, ConanFile, AutoToolsBuildEnvironment, RunEnvironment
2 import platform
3 import sys
4 import os
5
6
7 class SqlcipherConan(ConanFile):
8 name = "sqlcipher"
9 license = "BSD-3-Clause"
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "https://www.zetetic.net/sqlcipher/"
12 description = "SQLite extension that provides 256 bit AES encryption of database files."
13 settings = "os", "compiler", "build_type", "arch"
14 options = {
15 "shared": [True, False],
16 "fPIC": [True, False],
17 "crypto_library": ["openssl", "libressl"],
18 "with_largefile": [True, False],
19 "temporary_store": ["always_file", "default_file", "default_memory", "always_memory"]
20 }
21 default_options = {
22 "shared": False,
23 "fPIC": True,
24 "crypto_library": "openssl",
25 "with_largefile": True,
26 "temporary_store": "default_memory"
27 }
28 topics = ("database", "encryption", "SQLite")
29 exports_sources = "patches/*"
30 generators = "cmake"
31 _source_subfolder = "source_subfolder"
32
33
34 def config_options(self):
35 del self.settings.compiler.libcxx
36 del self.settings.compiler.cppstd
37 if self.settings.os != "Linux":
38 del self.options.with_largefile
39 if self.settings.os == "Windows":
40 del self.options.fPIC
41
42 def build_requirements(self):
43 # It is possible to have a MinGW cross-build toolchain (Linux to Windows)
44 # Only require msys2 when building on an actual Windows system
45 if self.settings.os == "Windows" and self.settings.compiler == "gcc" and tools.os_info.is_windows:
46 self.build_requires("msys2/20190524")
47 self.build_requires("tcl/8.6.10")
48
49 def requirements(self):
50 if self.options.crypto_library == "openssl":
51 self.requires("openssl/1.1.1h")
52 else:
53 self.requires("libressl/3.2.0")
54
55 def source(self):
56 tools.get(**self.conan_data["sources"][self.version])
57 extracted_dir = self.name + "-" + self.version
58 os.rename(extracted_dir, self._source_subfolder)
59
60 @property
61 def _temp_store_nmake_value(self):
62 return {"always_file": "0",
63 "default_file": "1",
64 "default_memory": "2",
65 "always_memory": "3"}.get(str(self.options.temporary_store))
66
67 @property
68 def _temp_store_autotools_value(self):
69 return {"always_file": "never",
70 "default_file": "no",
71 "default_memory": "yes",
72 "always_memory": "always"}.get(str(self.options.temporary_store))
73
74 def _build_visual(self):
75 crypto_dep = self.deps_cpp_info[str(self.options.crypto_library)]
76 crypto_incdir = crypto_dep.include_paths[0]
77 crypto_libdir = crypto_dep.lib_paths[0]
78 libs = map(lambda lib : lib + ".lib", crypto_dep.libs)
79 system_libs = map(lambda lib : lib + ".lib", crypto_dep.system_libs)
80
81 nmake_flags = [
82 "TLIBS=\"%s %s\"" % (" ".join(libs), " ".join(system_libs)),
83 "LTLIBPATHS=/LIBPATH:%s" % crypto_libdir,
84 "OPTS=\"-I%s -DSQLITE_HAS_CODEC\"" % (crypto_incdir),
85 "NO_TCL=1",
86 "USE_AMALGAMATION=1",
87 "OPT_FEATURE_FLAGS=-DSQLCIPHER_CRYPTO_OPENSSL",
88 "SQLITE_TEMP_STORE=%s" % self._temp_store_nmake_value,
89 "TCLSH_CMD=%s" % self.deps_env_info.TCLSH,
90 ]
91
92 main_target = "dll" if self.options.shared else "sqlcipher.lib"
93
94 if self.settings.compiler.runtime in ["MD", "MDd"]:
95 nmake_flags.append("USE_CRT_DLL=1")
96 if self.settings.build_type == "Debug":
97 nmake_flags.append("DEBUG=2")
98 nmake_flags.append("FOR_WIN10=1")
99 platforms = {"x86": "x86", "x86_64": "x64"}
100 nmake_flags.append("PLATFORM=%s" % platforms[self.settings.arch.value])
101 vcvars = tools.vcvars_command(self.settings)
102 self.run("%s && nmake /f Makefile.msc %s %s" % (vcvars, main_target, " ".join(nmake_flags)), cwd=self._source_subfolder)
103
104 def _build_autotools(self):
105 self.run('chmod +x configure', cwd=self._source_subfolder)
106 absolute_install_dir = os.path.abspath(os.path.join(".", "install"))
107 absolute_install_dir = absolute_install_dir.replace("\\", "/")
108 autotools_env = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
109 if self.settings.os == "Linux":
110 autotools_env.libs.append("dl")
111 if not self.options.with_largefile:
112 autotools_env.defines.append("SQLITE_DISABLE_LFS=1")
113 autotools_env.defines.extend(["SQLITE_HAS_CODEC", "SQLCIPHER_CRYPTO_OPENSSL"])
114
115 # sqlcipher config.sub does not contain android configurations...
116 # elf is the most basic `os' for Android
117 host = None
118 if self.settings.os == "Android":
119 host = "%s-linux-elf" % self._arch_id_str_compiler
120 elif self.settings.os == "Windows":
121 arch = str(self.settings.arch)
122 if arch == "x86":
123 arch = "i386"
124 host = "%s-pc-mingw32" % arch
125 elif self.settings.os == "iOS":
126 host = "%s-apple-darwin" % self.settings.arch
127
128 configure_args = self._get_configure_args(absolute_install_dir)
129 with tools.chdir(self._source_subfolder):
130 # Hack, uname -p returns i386, configure guesses x86_64, we must force i386 so that cross-compilation is correctly detected.
131 # Otherwise host/build are the same, and configure tries to launch a sample executable, and fails miserably.
132 env_vars = autotools_env.vars
133 if self.settings.os == "iOS":
134 build = "i386-apple-darwin"
135 # same for mingw...
136 elif self.settings.os == "Windows":
137 build = "x86_64-linux"
138 env_vars["config_TARGET_EXEEXT"] = ".exe"
139 else:
140 build = None
141 tclsh_cmd = self.deps_env_info.TCLSH
142 env_vars["TCLSH_CMD"] = tclsh_cmd.replace("\\", "/")
143 autotools_env.configure(args=configure_args, host=host, build=build, vars=env_vars)
144 if self.settings.os == "Windows":
145 # sqlcipher will create .exe for the build machine, which we defined to Linux...
146 tools.replace_in_file(os.path.join(self.build_folder, self._source_subfolder, "Makefile"), "BEXE = .exe", "BEXE = ")
147 autotools_env.make(args=["install"])
148
149 @property
150 def _arch_id_str_compiler(self):
151 return {"x86": "i686",
152 "armv6": "arm",
153 "armv7": "arm",
154 "armv7hf": "arm",
155 # Hack: config.guess of sqlcipher does not like aarch64
156 "armv8": "armv8",
157 "mips64": "mips64"}.get(str(self.settings.arch),
158 str(self.settings.arch))
159
160 def _get_configure_args(self, absolute_install_dir):
161 args = [
162 "--prefix=%s" % absolute_install_dir,
163
164 self._autotools_bool_arg("shared", self.options.shared),
165 self._autotools_bool_arg("static", not self.options.shared),
166 "--enable-tempstore=%s" % self._temp_store_autotools_value,
167 "--disable-tcl",
168 ]
169 if self.settings.os == "Windows":
170 args.extend(["config_BUILD_EXEEXT='.exe'", "config_TARGET_EXEEXT='.exe'"])
171 return args
172
173 def _autotools_bool_arg(self, arg_base_name, value):
174 prefix = "--enable-" if value else "--disable-"
175
176 return prefix + arg_base_name
177
178 def build(self):
179 for patch in self.conan_data["patches"][self.version]:
180 tools.patch(**patch)
181 if self.settings.os == "Macos":
182 tools.replace_in_file(os.path.join(self._source_subfolder, "configure"), r"-install_name \$rpath/", "-install_name ")
183
184 if self.settings.compiler == "Visual Studio":
185 self._build_visual()
186 else:
187 self._build_autotools()
188
189 def _package_unix(self):
190 self.copy("*sqlite3.h", src="install")
191 self.copy("*.so*", dst="lib", src="install", keep_path=False, symlinks=True)
192 self.copy("*.a", dst="lib", src="install", keep_path=False)
193 self.copy("*.lib", dst="lib", src="install", keep_path=False)
194 self.copy("*.dll", dst="bin", src="install", keep_path=False)
195 self.copy("*.dylib", dst="lib", src="install", keep_path=False)
196 self.copy("*LICENSE", dst="licenses", keep_path=False)
197
198 def _package_visual(self):
199 self.copy("*.dll", dst="bin", keep_path=False)
200 self.copy("*.lib", dst="lib", keep_path=False)
201 self.copy("*LICENSE", dst="licenses", keep_path=False)
202 self.copy("sqlite3.h", src=self._source_subfolder, dst=os.path.join("include", "sqlcipher"))
203
204 def package(self):
205 if self.settings.compiler == "Visual Studio":
206 self._package_visual()
207 else:
208 self._package_unix()
209
210 def package_info(self):
211 self.cpp_info.libs = ["sqlcipher"]
212 if self.settings.os == "Linux":
213 self.cpp_info.system_libs.extend(["pthread", "dl"])
214 self.cpp_info.defines = ["SQLITE_HAS_CODEC", 'SQLCIPHER_CRYPTO_OPENSSL', 'SQLITE_TEMP_STORE=%s' % self._temp_store_nmake_value]
215 # Allow using #include <sqlite3.h> even with sqlcipher (for libs like sqlpp11-connector-sqlite3)
216 self.cpp_info.includedirs.append(os.path.join("include", "sqlcipher"))
217
[end of recipes/sqlcipher/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/sqlcipher/all/conanfile.py b/recipes/sqlcipher/all/conanfile.py
--- a/recipes/sqlcipher/all/conanfile.py
+++ b/recipes/sqlcipher/all/conanfile.py
@@ -39,6 +39,10 @@
if self.settings.os == "Windows":
del self.options.fPIC
+ def configure(self):
+ if self.options.shared:
+ del self.options.fPIC
+
def build_requirements(self):
# It is possible to have a MinGW cross-build toolchain (Linux to Windows)
# Only require msys2 when building on an actual Windows system
@@ -48,7 +52,7 @@
def requirements(self):
if self.options.crypto_library == "openssl":
- self.requires("openssl/1.1.1h")
+ self.requires("openssl/1.1.1k")
else:
self.requires("libressl/3.2.0")
|
{"golden_diff": "diff --git a/recipes/sqlcipher/all/conanfile.py b/recipes/sqlcipher/all/conanfile.py\n--- a/recipes/sqlcipher/all/conanfile.py\n+++ b/recipes/sqlcipher/all/conanfile.py\n@@ -39,6 +39,10 @@\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n \n+ def configure(self):\n+ if self.options.shared:\n+ del self.options.fPIC\n+\n def build_requirements(self):\n # It is possible to have a MinGW cross-build toolchain (Linux to Windows)\n # Only require msys2 when building on an actual Windows system\n@@ -48,7 +52,7 @@\n \n def requirements(self):\n if self.options.crypto_library == \"openssl\":\n- self.requires(\"openssl/1.1.1h\")\n+ self.requires(\"openssl/1.1.1k\")\n else:\n self.requires(\"libressl/3.2.0\")\n", "issue": "[request] sqlcipher/4.4.3\n### Package Details\r\n * Package Name/Version: ** sqlcipher/4.4.3**\r\n * Changelog: **https://github.com/sqlcipher/sqlcipher/blob/master/CHANGELOG.md**\r\n----\r\n * works on Mac Catalyst \r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "from conans import tools, ConanFile, AutoToolsBuildEnvironment, RunEnvironment\nimport platform\nimport sys\nimport os\n\n\nclass SqlcipherConan(ConanFile):\n name = \"sqlcipher\"\n license = \"BSD-3-Clause\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.zetetic.net/sqlcipher/\"\n description = \"SQLite extension that provides 256 bit AES encryption of database files.\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"crypto_library\": [\"openssl\", \"libressl\"],\n \"with_largefile\": [True, False],\n \"temporary_store\": [\"always_file\", \"default_file\", \"default_memory\", \"always_memory\"]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"crypto_library\": \"openssl\",\n \"with_largefile\": True,\n \"temporary_store\": \"default_memory\"\n }\n topics = (\"database\", \"encryption\", \"SQLite\")\n exports_sources = \"patches/*\"\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n\n def config_options(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.settings.os != \"Linux\":\n del self.options.with_largefile\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def build_requirements(self):\n # It is possible to have a MinGW cross-build toolchain (Linux to Windows)\n # Only require msys2 when building on an actual Windows system\n if self.settings.os == \"Windows\" and self.settings.compiler == \"gcc\" and tools.os_info.is_windows:\n self.build_requires(\"msys2/20190524\")\n self.build_requires(\"tcl/8.6.10\")\n\n def requirements(self):\n if self.options.crypto_library == \"openssl\":\n self.requires(\"openssl/1.1.1h\")\n else:\n self.requires(\"libressl/3.2.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def _temp_store_nmake_value(self):\n return {\"always_file\": \"0\",\n \"default_file\": \"1\",\n \"default_memory\": \"2\",\n \"always_memory\": \"3\"}.get(str(self.options.temporary_store))\n\n @property\n def _temp_store_autotools_value(self):\n return {\"always_file\": \"never\",\n \"default_file\": \"no\",\n \"default_memory\": \"yes\",\n \"always_memory\": \"always\"}.get(str(self.options.temporary_store))\n\n def _build_visual(self):\n crypto_dep = self.deps_cpp_info[str(self.options.crypto_library)]\n crypto_incdir = crypto_dep.include_paths[0]\n crypto_libdir = crypto_dep.lib_paths[0]\n libs = map(lambda lib : lib + \".lib\", crypto_dep.libs)\n system_libs = map(lambda lib : lib + \".lib\", crypto_dep.system_libs)\n\n nmake_flags = [\n \"TLIBS=\\\"%s %s\\\"\" % (\" \".join(libs), \" \".join(system_libs)),\n \"LTLIBPATHS=/LIBPATH:%s\" % crypto_libdir,\n \"OPTS=\\\"-I%s -DSQLITE_HAS_CODEC\\\"\" % (crypto_incdir),\n \"NO_TCL=1\",\n \"USE_AMALGAMATION=1\",\n \"OPT_FEATURE_FLAGS=-DSQLCIPHER_CRYPTO_OPENSSL\",\n \"SQLITE_TEMP_STORE=%s\" % self._temp_store_nmake_value,\n \"TCLSH_CMD=%s\" % self.deps_env_info.TCLSH,\n ]\n\n main_target = \"dll\" if self.options.shared else \"sqlcipher.lib\"\n\n if self.settings.compiler.runtime in [\"MD\", \"MDd\"]:\n nmake_flags.append(\"USE_CRT_DLL=1\")\n if self.settings.build_type == \"Debug\":\n nmake_flags.append(\"DEBUG=2\")\n nmake_flags.append(\"FOR_WIN10=1\")\n platforms = {\"x86\": \"x86\", \"x86_64\": \"x64\"}\n nmake_flags.append(\"PLATFORM=%s\" % platforms[self.settings.arch.value])\n vcvars = tools.vcvars_command(self.settings)\n self.run(\"%s && nmake /f Makefile.msc %s %s\" % (vcvars, main_target, \" \".join(nmake_flags)), cwd=self._source_subfolder)\n\n def _build_autotools(self):\n self.run('chmod +x configure', cwd=self._source_subfolder)\n absolute_install_dir = os.path.abspath(os.path.join(\".\", \"install\"))\n absolute_install_dir = absolute_install_dir.replace(\"\\\\\", \"/\")\n autotools_env = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n if self.settings.os == \"Linux\":\n autotools_env.libs.append(\"dl\")\n if not self.options.with_largefile:\n autotools_env.defines.append(\"SQLITE_DISABLE_LFS=1\")\n autotools_env.defines.extend([\"SQLITE_HAS_CODEC\", \"SQLCIPHER_CRYPTO_OPENSSL\"])\n\n # sqlcipher config.sub does not contain android configurations...\n # elf is the most basic `os' for Android\n host = None\n if self.settings.os == \"Android\":\n host = \"%s-linux-elf\" % self._arch_id_str_compiler\n elif self.settings.os == \"Windows\":\n arch = str(self.settings.arch)\n if arch == \"x86\":\n arch = \"i386\"\n host = \"%s-pc-mingw32\" % arch\n elif self.settings.os == \"iOS\":\n host = \"%s-apple-darwin\" % self.settings.arch\n\n configure_args = self._get_configure_args(absolute_install_dir)\n with tools.chdir(self._source_subfolder):\n # Hack, uname -p returns i386, configure guesses x86_64, we must force i386 so that cross-compilation is correctly detected.\n # Otherwise host/build are the same, and configure tries to launch a sample executable, and fails miserably.\n env_vars = autotools_env.vars\n if self.settings.os == \"iOS\":\n build = \"i386-apple-darwin\"\n # same for mingw...\n elif self.settings.os == \"Windows\":\n build = \"x86_64-linux\"\n env_vars[\"config_TARGET_EXEEXT\"] = \".exe\"\n else:\n build = None\n tclsh_cmd = self.deps_env_info.TCLSH\n env_vars[\"TCLSH_CMD\"] = tclsh_cmd.replace(\"\\\\\", \"/\")\n autotools_env.configure(args=configure_args, host=host, build=build, vars=env_vars)\n if self.settings.os == \"Windows\":\n # sqlcipher will create .exe for the build machine, which we defined to Linux...\n tools.replace_in_file(os.path.join(self.build_folder, self._source_subfolder, \"Makefile\"), \"BEXE = .exe\", \"BEXE = \")\n autotools_env.make(args=[\"install\"])\n\n @property\n def _arch_id_str_compiler(self):\n return {\"x86\": \"i686\",\n \"armv6\": \"arm\",\n \"armv7\": \"arm\",\n \"armv7hf\": \"arm\",\n # Hack: config.guess of sqlcipher does not like aarch64\n \"armv8\": \"armv8\",\n \"mips64\": \"mips64\"}.get(str(self.settings.arch),\n str(self.settings.arch))\n\n def _get_configure_args(self, absolute_install_dir):\n args = [\n \"--prefix=%s\" % absolute_install_dir,\n\n self._autotools_bool_arg(\"shared\", self.options.shared),\n self._autotools_bool_arg(\"static\", not self.options.shared),\n \"--enable-tempstore=%s\" % self._temp_store_autotools_value,\n \"--disable-tcl\",\n ]\n if self.settings.os == \"Windows\":\n args.extend([\"config_BUILD_EXEEXT='.exe'\", \"config_TARGET_EXEEXT='.exe'\"])\n return args\n\n def _autotools_bool_arg(self, arg_base_name, value):\n prefix = \"--enable-\" if value else \"--disable-\"\n\n return prefix + arg_base_name\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.settings.os == \"Macos\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"), r\"-install_name \\$rpath/\", \"-install_name \")\n\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual()\n else:\n self._build_autotools()\n\n def _package_unix(self):\n self.copy(\"*sqlite3.h\", src=\"install\")\n self.copy(\"*.so*\", dst=\"lib\", src=\"install\", keep_path=False, symlinks=True)\n self.copy(\"*.a\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", src=\"install\", keep_path=False)\n self.copy(\"*.dylib\", dst=\"lib\", src=\"install\", keep_path=False)\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n\n def _package_visual(self):\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n self.copy(\"sqlite3.h\", src=self._source_subfolder, dst=os.path.join(\"include\", \"sqlcipher\"))\n\n def package(self):\n if self.settings.compiler == \"Visual Studio\":\n self._package_visual()\n else:\n self._package_unix()\n\n def package_info(self):\n self.cpp_info.libs = [\"sqlcipher\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"pthread\", \"dl\"])\n self.cpp_info.defines = [\"SQLITE_HAS_CODEC\", 'SQLCIPHER_CRYPTO_OPENSSL', 'SQLITE_TEMP_STORE=%s' % self._temp_store_nmake_value]\n # Allow using #include <sqlite3.h> even with sqlcipher (for libs like sqlpp11-connector-sqlite3)\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"sqlcipher\"))\n", "path": "recipes/sqlcipher/all/conanfile.py"}]}
| 3,517 | 212 |
gh_patches_debug_21139
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3600
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Listing package content
Maybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?
TIA
</issue>
<code>
[start of conans/client/local_file_getter.py]
1 import os
2
3 from conans.errors import NotFoundException
4 from conans.model.manifest import discarded_file
5 from conans.model.ref import PackageReference
6 from conans.util.files import load
7
8
9 def get_path(client_cache, conan_ref, package_id, path):
10 """
11 :param client_cache: Conan's client cache
12 :param conan_ref: Specified reference in the conan get command
13 :param package_id: Specified package id (can be None)
14 :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
15 :return: The real path in the local cache for the specified parameters
16 """
17 if package_id is None: # Get the file in the exported files
18 folder = client_cache.export(conan_ref)
19 else:
20 folder = client_cache.package(PackageReference(conan_ref, package_id))
21
22 abs_path = os.path.join(folder, path)
23 if not os.path.exists(abs_path):
24 raise NotFoundException("The specified path doesn't exist")
25 if os.path.isdir(abs_path):
26 return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])
27 else:
28 return load(abs_path)
29
[end of conans/client/local_file_getter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py
--- a/conans/client/local_file_getter.py
+++ b/conans/client/local_file_getter.py
@@ -11,13 +11,15 @@
:param client_cache: Conan's client cache
:param conan_ref: Specified reference in the conan get command
:param package_id: Specified package id (can be None)
- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
+ :param path: Path to a file, subfolder of exports (if only ref)
+ or package (if package_id defined)
:return: The real path in the local cache for the specified parameters
"""
if package_id is None: # Get the file in the exported files
folder = client_cache.export(conan_ref)
else:
- folder = client_cache.package(PackageReference(conan_ref, package_id))
+ folder = client_cache.package(PackageReference(conan_ref, package_id),
+ short_paths=None)
abs_path = os.path.join(folder, path)
if not os.path.exists(abs_path):
|
{"golden_diff": "diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py\n--- a/conans/client/local_file_getter.py\n+++ b/conans/client/local_file_getter.py\n@@ -11,13 +11,15 @@\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n+ :param path: Path to a file, subfolder of exports (if only ref)\n+ or package (if package_id defined)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n- folder = client_cache.package(PackageReference(conan_ref, package_id))\n+ folder = client_cache.package(PackageReference(conan_ref, package_id),\n+ short_paths=None)\n \n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n", "issue": "Listing package content\nMaybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?\r\nTIA\r\n\n", "before_files": [{"content": "import os\n\nfrom conans.errors import NotFoundException\nfrom conans.model.manifest import discarded_file\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import load\n\n\ndef get_path(client_cache, conan_ref, package_id, path):\n \"\"\"\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n folder = client_cache.package(PackageReference(conan_ref, package_id))\n\n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n raise NotFoundException(\"The specified path doesn't exist\")\n if os.path.isdir(abs_path):\n return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])\n else:\n return load(abs_path)\n", "path": "conans/client/local_file_getter.py"}]}
| 900 | 270 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.