problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10580
|
rasdani/github-patches
|
git_diff
|
plotly__plotly.py-1307
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
v2 API Retry on 429 status_code if users are throttled?
Right now we [Only retry on request errors with 5XX status code and use exponential backoff](https://github.com/plotly/plotly.py/pull/1146). But should we also retry for 429 status codes which users receive if they go over Plot.ly's throttling limit? Especially since calls to the image servers are now limited to 1/sec.
</issue>
<code>
[start of plotly/api/v2/utils.py]
1 from __future__ import absolute_import
2
3 import requests
4 from requests.compat import json as _json
5 from requests.exceptions import RequestException
6 from retrying import retry
7 from plotly import config, exceptions, version, utils
8 from plotly.api.utils import basic_auth
9
10
11 def make_params(**kwargs):
12 """
13 Helper to create a params dict, skipping undefined entries.
14
15 :returns: (dict) A params dict to pass to `request`.
16
17 """
18 return {k: v for k, v in kwargs.items() if v is not None}
19
20
21 def build_url(resource, id='', route=''):
22 """
23 Create a url for a request on a V2 resource.
24
25 :param (str) resource: E.g., 'files', 'plots', 'grids', etc.
26 :param (str) id: The unique identifier for the resource.
27 :param (str) route: Detail/list route. E.g., 'restore', 'lookup', etc.
28 :return: (str) The url.
29
30 """
31 base = config.get_config()['plotly_api_domain']
32 formatter = {'base': base, 'resource': resource, 'id': id, 'route': route}
33
34 # Add path to base url depending on the input params. Note that `route`
35 # can refer to a 'list' or a 'detail' route. Since it cannot refer to
36 # both at the same time, it's overloaded in this function.
37 if id:
38 if route:
39 url = '{base}/v2/{resource}/{id}/{route}'.format(**formatter)
40 else:
41 url = '{base}/v2/{resource}/{id}'.format(**formatter)
42 else:
43 if route:
44 url = '{base}/v2/{resource}/{route}'.format(**formatter)
45 else:
46 url = '{base}/v2/{resource}'.format(**formatter)
47
48 return url
49
50
51 def validate_response(response):
52 """
53 Raise a helpful PlotlyRequestError for failed requests.
54
55 :param (requests.Response) response: A Response object from an api request.
56 :raises: (PlotlyRequestError) If the request failed for any reason.
57 :returns: (None)
58
59 """
60 if response.ok:
61 return
62
63 content = response.content
64 status_code = response.status_code
65 try:
66 parsed_content = response.json()
67 except ValueError:
68 message = content if content else 'No Content'
69 raise exceptions.PlotlyRequestError(message, status_code, content)
70
71 message = ''
72 if isinstance(parsed_content, dict):
73 errors = parsed_content.get('errors', [])
74 messages = [error.get('message') for error in errors]
75 message = '\n'.join([msg for msg in messages if msg])
76 if not message:
77 message = content if content else 'No Content'
78
79 raise exceptions.PlotlyRequestError(message, status_code, content)
80
81
82 def get_headers():
83 """
84 Using session credentials/config, get headers for a V2 API request.
85
86 Users may have their own proxy layer and so we free up the `authorization`
87 header for this purpose (instead adding the user authorization in a new
88 `plotly-authorization` header). See pull #239.
89
90 :returns: (dict) Headers to add to a requests.request call.
91
92 """
93 creds = config.get_credentials()
94
95 headers = {
96 'plotly-client-platform': 'python {}'.format(version.stable_semver()),
97 'content-type': 'application/json'
98 }
99
100 plotly_auth = basic_auth(creds['username'], creds['api_key'])
101 proxy_auth = basic_auth(creds['proxy_username'], creds['proxy_password'])
102
103 if config.get_config()['plotly_proxy_authorization']:
104 headers['authorization'] = proxy_auth
105 if creds['username'] and creds['api_key']:
106 headers['plotly-authorization'] = plotly_auth
107 else:
108 if creds['username'] and creds['api_key']:
109 headers['authorization'] = plotly_auth
110
111 return headers
112
113
114 def should_retry(exception):
115 if isinstance(exception, exceptions.PlotlyRequestError):
116 if (isinstance(exception.status_code, int) and
117 500 <= exception.status_code < 600):
118 # Retry on 5XX errors.
119 return True
120 elif 'Uh oh, an error occurred' in exception.message:
121 return True
122
123 return False
124
125
126 @retry(wait_exponential_multiplier=1000, wait_exponential_max=16000,
127 stop_max_delay=180000, retry_on_exception=should_retry)
128 def request(method, url, **kwargs):
129 """
130 Central place to make any api v2 api request.
131
132 :param (str) method: The request method ('get', 'put', 'delete', ...).
133 :param (str) url: The full api url to make the request to.
134 :param kwargs: These are passed along (but possibly mutated) to requests.
135 :return: (requests.Response) The response directly from requests.
136
137 """
138 kwargs['headers'] = dict(kwargs.get('headers', {}), **get_headers())
139
140 # Change boolean params to lowercase strings. E.g., `True` --> `'true'`.
141 # Just change the value so that requests handles query string creation.
142 if isinstance(kwargs.get('params'), dict):
143 kwargs['params'] = kwargs['params'].copy()
144 for key in kwargs['params']:
145 if isinstance(kwargs['params'][key], bool):
146 kwargs['params'][key] = _json.dumps(kwargs['params'][key])
147
148 # We have a special json encoding class for non-native objects.
149 if kwargs.get('json') is not None:
150 if kwargs.get('data'):
151 raise exceptions.PlotlyError('Cannot supply data and json kwargs.')
152 kwargs['data'] = _json.dumps(kwargs.pop('json'), sort_keys=True,
153 cls=utils.PlotlyJSONEncoder)
154
155 # The config file determines whether reuqests should *verify*.
156 kwargs['verify'] = config.get_config()['plotly_ssl_verification']
157
158 try:
159 response = requests.request(method, url, **kwargs)
160 except RequestException as e:
161 # The message can be an exception. E.g., MaxRetryError.
162 message = str(getattr(e, 'message', 'No message'))
163 response = getattr(e, 'response', None)
164 status_code = response.status_code if response else None
165 content = response.content if response else 'No content'
166 raise exceptions.PlotlyRequestError(message, status_code, content)
167 validate_response(response)
168 return response
169
[end of plotly/api/v2/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plotly/api/v2/utils.py b/plotly/api/v2/utils.py
--- a/plotly/api/v2/utils.py
+++ b/plotly/api/v2/utils.py
@@ -114,8 +114,8 @@
def should_retry(exception):
if isinstance(exception, exceptions.PlotlyRequestError):
if (isinstance(exception.status_code, int) and
- 500 <= exception.status_code < 600):
- # Retry on 5XX errors.
+ (500 <= exception.status_code < 600 or exception.status_code == 429)):
+ # Retry on 5XX and 429 (image export throttling) errors.
return True
elif 'Uh oh, an error occurred' in exception.message:
return True
|
{"golden_diff": "diff --git a/plotly/api/v2/utils.py b/plotly/api/v2/utils.py\n--- a/plotly/api/v2/utils.py\n+++ b/plotly/api/v2/utils.py\n@@ -114,8 +114,8 @@\n def should_retry(exception):\n if isinstance(exception, exceptions.PlotlyRequestError):\n if (isinstance(exception.status_code, int) and\n- 500 <= exception.status_code < 600):\n- # Retry on 5XX errors.\n+ (500 <= exception.status_code < 600 or exception.status_code == 429)):\n+ # Retry on 5XX and 429 (image export throttling) errors.\n return True\n elif 'Uh oh, an error occurred' in exception.message:\n return True\n", "issue": "v2 API Retry on 429 status_code if users are throttled?\nRight now we [Only retry on request errors with 5XX status code and use exponential backoff](https://github.com/plotly/plotly.py/pull/1146). But should we also retry for 429 status codes which users receive if they go over Plot.ly's throttling limit? Especially since calls to the image servers are now limited to 1/sec.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport requests\nfrom requests.compat import json as _json\nfrom requests.exceptions import RequestException\nfrom retrying import retry\nfrom plotly import config, exceptions, version, utils\nfrom plotly.api.utils import basic_auth\n\n\ndef make_params(**kwargs):\n \"\"\"\n Helper to create a params dict, skipping undefined entries.\n\n :returns: (dict) A params dict to pass to `request`.\n\n \"\"\"\n return {k: v for k, v in kwargs.items() if v is not None}\n\n\ndef build_url(resource, id='', route=''):\n \"\"\"\n Create a url for a request on a V2 resource.\n\n :param (str) resource: E.g., 'files', 'plots', 'grids', etc.\n :param (str) id: The unique identifier for the resource.\n :param (str) route: Detail/list route. E.g., 'restore', 'lookup', etc.\n :return: (str) The url.\n\n \"\"\"\n base = config.get_config()['plotly_api_domain']\n formatter = {'base': base, 'resource': resource, 'id': id, 'route': route}\n\n # Add path to base url depending on the input params. Note that `route`\n # can refer to a 'list' or a 'detail' route. Since it cannot refer to\n # both at the same time, it's overloaded in this function.\n if id:\n if route:\n url = '{base}/v2/{resource}/{id}/{route}'.format(**formatter)\n else:\n url = '{base}/v2/{resource}/{id}'.format(**formatter)\n else:\n if route:\n url = '{base}/v2/{resource}/{route}'.format(**formatter)\n else:\n url = '{base}/v2/{resource}'.format(**formatter)\n\n return url\n\n\ndef validate_response(response):\n \"\"\"\n Raise a helpful PlotlyRequestError for failed requests.\n\n :param (requests.Response) response: A Response object from an api request.\n :raises: (PlotlyRequestError) If the request failed for any reason.\n :returns: (None)\n\n \"\"\"\n if response.ok:\n return\n\n content = response.content\n status_code = response.status_code\n try:\n parsed_content = response.json()\n except ValueError:\n message = content if content else 'No Content'\n raise exceptions.PlotlyRequestError(message, status_code, content)\n\n message = ''\n if isinstance(parsed_content, dict):\n errors = parsed_content.get('errors', [])\n messages = [error.get('message') for error in errors]\n message = '\\n'.join([msg for msg in messages if msg])\n if not message:\n message = content if content else 'No Content'\n\n raise exceptions.PlotlyRequestError(message, status_code, content)\n\n\ndef get_headers():\n \"\"\"\n Using session credentials/config, get headers for a V2 API request.\n\n Users may have their own proxy layer and so we free up the `authorization`\n header for this purpose (instead adding the user authorization in a new\n `plotly-authorization` header). See pull #239.\n\n :returns: (dict) Headers to add to a requests.request call.\n\n \"\"\"\n creds = config.get_credentials()\n\n headers = {\n 'plotly-client-platform': 'python {}'.format(version.stable_semver()),\n 'content-type': 'application/json'\n }\n\n plotly_auth = basic_auth(creds['username'], creds['api_key'])\n proxy_auth = basic_auth(creds['proxy_username'], creds['proxy_password'])\n\n if config.get_config()['plotly_proxy_authorization']:\n headers['authorization'] = proxy_auth\n if creds['username'] and creds['api_key']:\n headers['plotly-authorization'] = plotly_auth\n else:\n if creds['username'] and creds['api_key']:\n headers['authorization'] = plotly_auth\n\n return headers\n\n\ndef should_retry(exception):\n if isinstance(exception, exceptions.PlotlyRequestError):\n if (isinstance(exception.status_code, int) and\n 500 <= exception.status_code < 600):\n # Retry on 5XX errors.\n return True\n elif 'Uh oh, an error occurred' in exception.message:\n return True\n\n return False\n\n\n@retry(wait_exponential_multiplier=1000, wait_exponential_max=16000,\n stop_max_delay=180000, retry_on_exception=should_retry)\ndef request(method, url, **kwargs):\n \"\"\"\n Central place to make any api v2 api request.\n\n :param (str) method: The request method ('get', 'put', 'delete', ...).\n :param (str) url: The full api url to make the request to.\n :param kwargs: These are passed along (but possibly mutated) to requests.\n :return: (requests.Response) The response directly from requests.\n\n \"\"\"\n kwargs['headers'] = dict(kwargs.get('headers', {}), **get_headers())\n\n # Change boolean params to lowercase strings. E.g., `True` --> `'true'`.\n # Just change the value so that requests handles query string creation.\n if isinstance(kwargs.get('params'), dict):\n kwargs['params'] = kwargs['params'].copy()\n for key in kwargs['params']:\n if isinstance(kwargs['params'][key], bool):\n kwargs['params'][key] = _json.dumps(kwargs['params'][key])\n\n # We have a special json encoding class for non-native objects.\n if kwargs.get('json') is not None:\n if kwargs.get('data'):\n raise exceptions.PlotlyError('Cannot supply data and json kwargs.')\n kwargs['data'] = _json.dumps(kwargs.pop('json'), sort_keys=True,\n cls=utils.PlotlyJSONEncoder)\n\n # The config file determines whether reuqests should *verify*.\n kwargs['verify'] = config.get_config()['plotly_ssl_verification']\n\n try:\n response = requests.request(method, url, **kwargs)\n except RequestException as e:\n # The message can be an exception. E.g., MaxRetryError.\n message = str(getattr(e, 'message', 'No message'))\n response = getattr(e, 'response', None)\n status_code = response.status_code if response else None\n content = response.content if response else 'No content'\n raise exceptions.PlotlyRequestError(message, status_code, content)\n validate_response(response)\n return response\n", "path": "plotly/api/v2/utils.py"}]}
| 2,455 | 182 |
gh_patches_debug_33287
|
rasdani/github-patches
|
git_diff
|
ranaroussi__yfinance-1093
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issues with Date field
This issue appeared in version 0.1.77. I did no other package or python version updates since then.
Following script used to work on the date field:
`df[df.Date > '2020-01-01']`
But I am now getting following error message: TypeError: '>' not supported between instances of 'Timestamp' and 'str'
When I solve that issue by converting the string to a date (pd.to_datetime), I am getting other issues like: ValueError: Tz-aware datetime.datetime cannot be converted to datetime64 unless utc=True
Somewhat further in my script, there is a pd.merge function based on the Date field received from yfinance. It gives now following error: Cannot compare between dtype('<M8[ns]') and dtype('0')
So I guess something has changed with how the dates are passed through. If yes, do you know how I can strip the received date from all these tz related stuff and just use it as a datetime64? I tried things like .dt.normalize() or .dt.date, but it always seems to give errors.
</issue>
<code>
[start of yfinance/multi.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # yfinance - market data downloader
5 # https://github.com/ranaroussi/yfinance
6 #
7 # Copyright 2017-2019 Ran Aroussi
8 #
9 # Licensed under the Apache License, Version 2.0 (the "License");
10 # you may not use this file except in compliance with the License.
11 # You may obtain a copy of the License at
12 #
13 # http://www.apache.org/licenses/LICENSE-2.0
14 #
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
20 #
21
22 from __future__ import print_function
23
24 import time as _time
25 import multitasking as _multitasking
26 import pandas as _pd
27
28 from . import Ticker, utils
29 from . import shared
30
31
32 def download(tickers, start=None, end=None, actions=False, threads=True,
33 group_by='column', auto_adjust=False, back_adjust=False, keepna=False,
34 progress=True, period="max", show_errors=True, interval="1d", prepost=False,
35 proxy=None, rounding=False, timeout=None, **kwargs):
36 """Download yahoo tickers
37 :Parameters:
38 tickers : str, list
39 List of tickers to download
40 period : str
41 Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
42 Either Use period parameter or use start and end
43 interval : str
44 Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
45 Intraday data cannot extend last 60 days
46 start: str
47 Download start date string (YYYY-MM-DD) or _datetime.
48 Default is 1900-01-01
49 end: str
50 Download end date string (YYYY-MM-DD) or _datetime.
51 Default is now
52 group_by : str
53 Group by 'ticker' or 'column' (default)
54 prepost : bool
55 Include Pre and Post market data in results?
56 Default is False
57 auto_adjust: bool
58 Adjust all OHLC automatically? Default is False
59 keepna: bool
60 Keep NaN rows returned by Yahoo?
61 Default is False
62 actions: bool
63 Download dividend + stock splits data. Default is False
64 threads: bool / int
65 How many threads to use for mass downloading. Default is True
66 proxy: str
67 Optional. Proxy server URL scheme. Default is None
68 rounding: bool
69 Optional. Round values to 2 decimal places?
70 show_errors: bool
71 Optional. Doesn't print errors if False
72 timeout: None or float
73 If not None stops waiting for a response after given number of
74 seconds. (Can also be a fraction of a second e.g. 0.01)
75 """
76
77 # create ticker list
78 tickers = tickers if isinstance(
79 tickers, (list, set, tuple)) else tickers.replace(',', ' ').split()
80
81 # accept isin as ticker
82 shared._ISINS = {}
83 _tickers_ = []
84 for ticker in tickers:
85 if utils.is_isin(ticker):
86 isin = ticker
87 ticker = utils.get_ticker_by_isin(ticker, proxy)
88 shared._ISINS[ticker] = isin
89 _tickers_.append(ticker)
90
91 tickers = _tickers_
92
93 tickers = list(set([ticker.upper() for ticker in tickers]))
94
95 if progress:
96 shared._PROGRESS_BAR = utils.ProgressBar(len(tickers), 'completed')
97
98 # reset shared._DFS
99 shared._DFS = {}
100 shared._ERRORS = {}
101
102 # download using threads
103 if threads:
104 if threads is True:
105 threads = min([len(tickers), _multitasking.cpu_count() * 2])
106 _multitasking.set_max_threads(threads)
107 for i, ticker in enumerate(tickers):
108 _download_one_threaded(ticker, period=period, interval=interval,
109 start=start, end=end, prepost=prepost,
110 actions=actions, auto_adjust=auto_adjust,
111 back_adjust=back_adjust, keepna=keepna,
112 progress=(progress and i > 0), proxy=proxy,
113 rounding=rounding, timeout=timeout)
114 while len(shared._DFS) < len(tickers):
115 _time.sleep(0.01)
116
117 # download synchronously
118 else:
119 for i, ticker in enumerate(tickers):
120 data = _download_one(ticker, period=period, interval=interval,
121 start=start, end=end, prepost=prepost,
122 actions=actions, auto_adjust=auto_adjust,
123 back_adjust=back_adjust, keepna=keepna, proxy=proxy,
124 rounding=rounding, timeout=timeout)
125 shared._DFS[ticker.upper()] = data
126 if progress:
127 shared._PROGRESS_BAR.animate()
128
129 if progress:
130 shared._PROGRESS_BAR.completed()
131
132 if shared._ERRORS and show_errors:
133 print('\n%.f Failed download%s:' % (
134 len(shared._ERRORS), 's' if len(shared._ERRORS) > 1 else ''))
135 # print(shared._ERRORS)
136 print("\n".join(['- %s: %s' %
137 v for v in list(shared._ERRORS.items())]))
138
139 if len(tickers) == 1:
140 ticker = tickers[0]
141 return shared._DFS[shared._ISINS.get(ticker, ticker)]
142
143 try:
144 data = _pd.concat(shared._DFS.values(), axis=1,
145 keys=shared._DFS.keys())
146 except Exception:
147 _realign_dfs()
148 data = _pd.concat(shared._DFS.values(), axis=1,
149 keys=shared._DFS.keys())
150
151 # switch names back to isins if applicable
152 data.rename(columns=shared._ISINS, inplace=True)
153
154 if group_by == 'column':
155 data.columns = data.columns.swaplevel(0, 1)
156 data.sort_index(level=0, axis=1, inplace=True)
157
158 return data
159
160
161 def _realign_dfs():
162 idx_len = 0
163 idx = None
164
165 for df in shared._DFS.values():
166 if len(df) > idx_len:
167 idx_len = len(df)
168 idx = df.index
169
170 for key in shared._DFS.keys():
171 try:
172 shared._DFS[key] = _pd.DataFrame(
173 index=idx, data=shared._DFS[key]).drop_duplicates()
174 except Exception:
175 shared._DFS[key] = _pd.concat([
176 utils.empty_df(idx), shared._DFS[key].dropna()
177 ], axis=0, sort=True)
178
179 # remove duplicate index
180 shared._DFS[key] = shared._DFS[key].loc[
181 ~shared._DFS[key].index.duplicated(keep='last')]
182
183
184 @_multitasking.task
185 def _download_one_threaded(ticker, start=None, end=None,
186 auto_adjust=False, back_adjust=False,
187 actions=False, progress=True, period="max",
188 interval="1d", prepost=False, proxy=None,
189 keepna=False, rounding=False, timeout=None):
190
191 data = _download_one(ticker, start, end, auto_adjust, back_adjust,
192 actions, period, interval, prepost, proxy, rounding,
193 keepna, timeout)
194 shared._DFS[ticker.upper()] = data
195 if progress:
196 shared._PROGRESS_BAR.animate()
197
198
199 def _download_one(ticker, start=None, end=None,
200 auto_adjust=False, back_adjust=False,
201 actions=False, period="max", interval="1d",
202 prepost=False, proxy=None, rounding=False,
203 keepna=False, timeout=None):
204
205 return Ticker(ticker).history(period=period, interval=interval,
206 start=start, end=end, prepost=prepost,
207 actions=actions, auto_adjust=auto_adjust,
208 back_adjust=back_adjust, proxy=proxy,
209 rounding=rounding, keepna=keepna, many=True,
210 timeout=timeout)
211
[end of yfinance/multi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yfinance/multi.py b/yfinance/multi.py
--- a/yfinance/multi.py
+++ b/yfinance/multi.py
@@ -29,7 +29,7 @@
from . import shared
-def download(tickers, start=None, end=None, actions=False, threads=True,
+def download(tickers, start=None, end=None, actions=False, threads=True, ignore_tz=True,
group_by='column', auto_adjust=False, back_adjust=False, keepna=False,
progress=True, period="max", show_errors=True, interval="1d", prepost=False,
proxy=None, rounding=False, timeout=None, **kwargs):
@@ -63,6 +63,9 @@
Download dividend + stock splits data. Default is False
threads: bool / int
How many threads to use for mass downloading. Default is True
+ ignore_tz: bool
+ When combining from different timezones, ignore that part of datetime.
+ Default is True
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
@@ -140,12 +143,17 @@
ticker = tickers[0]
return shared._DFS[shared._ISINS.get(ticker, ticker)]
+ if ignore_tz:
+ for tkr in shared._DFS.keys():
+ if (shared._DFS[tkr] is not None) and (shared._DFS[tkr].shape[0]>0):
+ shared._DFS[tkr].index = shared._DFS[tkr].index.tz_localize(None)
+
try:
- data = _pd.concat(shared._DFS.values(), axis=1,
+ data = _pd.concat(shared._DFS.values(), axis=1, sort=True,
keys=shared._DFS.keys())
except Exception:
_realign_dfs()
- data = _pd.concat(shared._DFS.values(), axis=1,
+ data = _pd.concat(shared._DFS.values(), axis=1, sort=True,
keys=shared._DFS.keys())
# switch names back to isins if applicable
|
{"golden_diff": "diff --git a/yfinance/multi.py b/yfinance/multi.py\n--- a/yfinance/multi.py\n+++ b/yfinance/multi.py\n@@ -29,7 +29,7 @@\n from . import shared\n \n \n-def download(tickers, start=None, end=None, actions=False, threads=True,\n+def download(tickers, start=None, end=None, actions=False, threads=True, ignore_tz=True, \n group_by='column', auto_adjust=False, back_adjust=False, keepna=False,\n progress=True, period=\"max\", show_errors=True, interval=\"1d\", prepost=False,\n proxy=None, rounding=False, timeout=None, **kwargs):\n@@ -63,6 +63,9 @@\n Download dividend + stock splits data. Default is False\n threads: bool / int\n How many threads to use for mass downloading. Default is True\n+ ignore_tz: bool\n+ When combining from different timezones, ignore that part of datetime.\n+ Default is True\n proxy: str\n Optional. Proxy server URL scheme. Default is None\n rounding: bool\n@@ -140,12 +143,17 @@\n ticker = tickers[0]\n return shared._DFS[shared._ISINS.get(ticker, ticker)]\n \n+ if ignore_tz:\n+ for tkr in shared._DFS.keys():\n+ if (shared._DFS[tkr] is not None) and (shared._DFS[tkr].shape[0]>0):\n+ shared._DFS[tkr].index = shared._DFS[tkr].index.tz_localize(None)\n+\n try:\n- data = _pd.concat(shared._DFS.values(), axis=1,\n+ data = _pd.concat(shared._DFS.values(), axis=1, sort=True,\n keys=shared._DFS.keys())\n except Exception:\n _realign_dfs()\n- data = _pd.concat(shared._DFS.values(), axis=1,\n+ data = _pd.concat(shared._DFS.values(), axis=1, sort=True,\n keys=shared._DFS.keys())\n \n # switch names back to isins if applicable\n", "issue": "Issues with Date field\nThis issue appeared in version 0.1.77. I did no other package or python version updates since then.\r\n\r\nFollowing script used to work on the date field:\r\n`df[df.Date > '2020-01-01']`\r\n\r\nBut I am now getting following error message: TypeError: '>' not supported between instances of 'Timestamp' and 'str'\r\n\r\nWhen I solve that issue by converting the string to a date (pd.to_datetime), I am getting other issues like: ValueError: Tz-aware datetime.datetime cannot be converted to datetime64 unless utc=True\r\n\r\nSomewhat further in my script, there is a pd.merge function based on the Date field received from yfinance. It gives now following error: Cannot compare between dtype('<M8[ns]') and dtype('0')\r\n\r\nSo I guess something has changed with how the dates are passed through. If yes, do you know how I can strip the received date from all these tz related stuff and just use it as a datetime64? I tried things like .dt.normalize() or .dt.date, but it always seems to give errors. \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport time as _time\nimport multitasking as _multitasking\nimport pandas as _pd\n\nfrom . import Ticker, utils\nfrom . import shared\n\n\ndef download(tickers, start=None, end=None, actions=False, threads=True,\n group_by='column', auto_adjust=False, back_adjust=False, keepna=False,\n progress=True, period=\"max\", show_errors=True, interval=\"1d\", prepost=False,\n proxy=None, rounding=False, timeout=None, **kwargs):\n \"\"\"Download yahoo tickers\n :Parameters:\n tickers : str, list\n List of tickers to download\n period : str\n Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max\n Either Use period parameter or use start and end\n interval : str\n Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo\n Intraday data cannot extend last 60 days\n start: str\n Download start date string (YYYY-MM-DD) or _datetime.\n Default is 1900-01-01\n end: str\n Download end date string (YYYY-MM-DD) or _datetime.\n Default is now\n group_by : str\n Group by 'ticker' or 'column' (default)\n prepost : bool\n Include Pre and Post market data in results?\n Default is False\n auto_adjust: bool\n Adjust all OHLC automatically? Default is False\n keepna: bool\n Keep NaN rows returned by Yahoo?\n Default is False\n actions: bool\n Download dividend + stock splits data. Default is False\n threads: bool / int\n How many threads to use for mass downloading. Default is True\n proxy: str\n Optional. Proxy server URL scheme. Default is None\n rounding: bool\n Optional. Round values to 2 decimal places?\n show_errors: bool\n Optional. Doesn't print errors if False\n timeout: None or float\n If not None stops waiting for a response after given number of\n seconds. (Can also be a fraction of a second e.g. 0.01)\n \"\"\"\n\n # create ticker list\n tickers = tickers if isinstance(\n tickers, (list, set, tuple)) else tickers.replace(',', ' ').split()\n\n # accept isin as ticker\n shared._ISINS = {}\n _tickers_ = []\n for ticker in tickers:\n if utils.is_isin(ticker):\n isin = ticker\n ticker = utils.get_ticker_by_isin(ticker, proxy)\n shared._ISINS[ticker] = isin\n _tickers_.append(ticker)\n\n tickers = _tickers_\n\n tickers = list(set([ticker.upper() for ticker in tickers]))\n\n if progress:\n shared._PROGRESS_BAR = utils.ProgressBar(len(tickers), 'completed')\n\n # reset shared._DFS\n shared._DFS = {}\n shared._ERRORS = {}\n\n # download using threads\n if threads:\n if threads is True:\n threads = min([len(tickers), _multitasking.cpu_count() * 2])\n _multitasking.set_max_threads(threads)\n for i, ticker in enumerate(tickers):\n _download_one_threaded(ticker, period=period, interval=interval,\n start=start, end=end, prepost=prepost,\n actions=actions, auto_adjust=auto_adjust,\n back_adjust=back_adjust, keepna=keepna,\n progress=(progress and i > 0), proxy=proxy,\n rounding=rounding, timeout=timeout)\n while len(shared._DFS) < len(tickers):\n _time.sleep(0.01)\n\n # download synchronously\n else:\n for i, ticker in enumerate(tickers):\n data = _download_one(ticker, period=period, interval=interval,\n start=start, end=end, prepost=prepost,\n actions=actions, auto_adjust=auto_adjust,\n back_adjust=back_adjust, keepna=keepna, proxy=proxy,\n rounding=rounding, timeout=timeout)\n shared._DFS[ticker.upper()] = data\n if progress:\n shared._PROGRESS_BAR.animate()\n\n if progress:\n shared._PROGRESS_BAR.completed()\n\n if shared._ERRORS and show_errors:\n print('\\n%.f Failed download%s:' % (\n len(shared._ERRORS), 's' if len(shared._ERRORS) > 1 else ''))\n # print(shared._ERRORS)\n print(\"\\n\".join(['- %s: %s' %\n v for v in list(shared._ERRORS.items())]))\n\n if len(tickers) == 1:\n ticker = tickers[0]\n return shared._DFS[shared._ISINS.get(ticker, ticker)]\n\n try:\n data = _pd.concat(shared._DFS.values(), axis=1,\n keys=shared._DFS.keys())\n except Exception:\n _realign_dfs()\n data = _pd.concat(shared._DFS.values(), axis=1,\n keys=shared._DFS.keys())\n\n # switch names back to isins if applicable\n data.rename(columns=shared._ISINS, inplace=True)\n\n if group_by == 'column':\n data.columns = data.columns.swaplevel(0, 1)\n data.sort_index(level=0, axis=1, inplace=True)\n\n return data\n\n\ndef _realign_dfs():\n idx_len = 0\n idx = None\n\n for df in shared._DFS.values():\n if len(df) > idx_len:\n idx_len = len(df)\n idx = df.index\n\n for key in shared._DFS.keys():\n try:\n shared._DFS[key] = _pd.DataFrame(\n index=idx, data=shared._DFS[key]).drop_duplicates()\n except Exception:\n shared._DFS[key] = _pd.concat([\n utils.empty_df(idx), shared._DFS[key].dropna()\n ], axis=0, sort=True)\n\n # remove duplicate index\n shared._DFS[key] = shared._DFS[key].loc[\n ~shared._DFS[key].index.duplicated(keep='last')]\n\n\n@_multitasking.task\ndef _download_one_threaded(ticker, start=None, end=None,\n auto_adjust=False, back_adjust=False,\n actions=False, progress=True, period=\"max\",\n interval=\"1d\", prepost=False, proxy=None,\n keepna=False, rounding=False, timeout=None):\n\n data = _download_one(ticker, start, end, auto_adjust, back_adjust,\n actions, period, interval, prepost, proxy, rounding,\n keepna, timeout)\n shared._DFS[ticker.upper()] = data\n if progress:\n shared._PROGRESS_BAR.animate()\n\n\ndef _download_one(ticker, start=None, end=None,\n auto_adjust=False, back_adjust=False,\n actions=False, period=\"max\", interval=\"1d\",\n prepost=False, proxy=None, rounding=False,\n keepna=False, timeout=None):\n\n return Ticker(ticker).history(period=period, interval=interval,\n start=start, end=end, prepost=prepost,\n actions=actions, auto_adjust=auto_adjust,\n back_adjust=back_adjust, proxy=proxy,\n rounding=rounding, keepna=keepna, many=True,\n timeout=timeout)\n", "path": "yfinance/multi.py"}]}
| 3,131 | 466 |
gh_patches_debug_7628
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3074
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OpenAPI Specification for /schemas/ endpoint and fix the incorrect specs
## Problem
In order to ensure the accuracy of the specs generated by drf-spectacular for /schemas/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.
## Proposed solution
* Implement custom preprocessing hook function to filter out all endpoints except for the /schemas/ endpoint.The hook function selectively retains only the endpoint paths that match the /schemas/
* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /schemas/ endpoint is considered during the OpenAPI specification generation process.
* Finally, generate the spec file using the `manage.py spectacular --color --file schema.yml` command
</issue>
<code>
[start of config/settings/openapi.py]
1 def custom_preprocessing_hook(endpoints):
2 filtered = []
3 for (path, path_regex, method, callback) in endpoints:
4 # Remove all but DRF API endpoints
5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"):
6 filtered.append((path, path_regex, method, callback))
7 return filtered
8
9
10 def remove_url_prefix_hook(result, **kwargs):
11 # Remove namespace and version URL prefix from the operation Id of the generated API schema
12 for path, path_info in result['paths'].items():
13 for method, operation in path_info.items():
14 operation_id = operation.get('operationId')
15 if operation_id:
16 if path.startswith('/api/db/v0/'):
17 operation['operationId'] = operation_id.replace('db_v0_', '')
18 elif path.startswith('/api/ui/v0/'):
19 operation['operationId'] = operation_id.replace('ui_v0_', '')
20
21 return result
22
[end of config/settings/openapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -2,7 +2,7 @@
filtered = []
for (path, path_regex, method, callback) in endpoints:
# Remove all but DRF API endpoints
- if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/"):
+ if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/"):
filtered.append((path, path_regex, method, callback))
return filtered
|
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /schemas/ endpoint and fix the incorrect specs\n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /schemas/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /schemas/ endpoint.The hook function selectively retains only the endpoint paths that match the /schemas/ \r\n* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /schemas/ endpoint is considered during the OpenAPI specification generation process.\r\n* Finally, generate the spec file using the `manage.py spectacular --color --file schema.yml` command\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
| 947 | 149 |
gh_patches_debug_5497
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-999
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keep tighter version constraints on TensorFlow releases
# Description
As Issue #997 makes it clear that minor releases might as well be major releases for TensorFlow, then it is probably worth keeping tighter version constraints on them and just watching the releases of TensorFlow and TensorFlow Probability to see when we can relax these.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.0',
7 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes
12 'minuit': ['iminuit~=1.4,>=1.4.3'], # Use "name" keyword in MINUIT optimizer
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted(set(['matplotlib']))
23 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'jupyter',
43 'uproot~=3.3',
44 'graphviz',
45 'jsonpatch',
46 ]
47 )
48 )
49 extras_require['docs'] = sorted(
50 set(
51 [
52 'sphinx>=3.1.2',
53 'sphinxcontrib-bibtex',
54 'sphinx-click',
55 'sphinx_rtd_theme',
56 'nbsphinx',
57 'ipywidgets',
58 'sphinx-issues',
59 'sphinx-copybutton>0.2.9',
60 ]
61 )
62 )
63 extras_require['develop'] = sorted(
64 set(
65 extras_require['docs']
66 + extras_require['lint']
67 + extras_require['test']
68 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
69 )
70 )
71 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
72
73
74 setup(
75 extras_require=extras_require,
76 use_scm_version=lambda: {'local_scheme': lambda version: ''},
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,8 +3,8 @@
extras_require = {
'shellcomplete': ['click_completion'],
'tensorflow': [
- 'tensorflow~=2.0',
- 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11
+ 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
+ 'tensorflow-probability~=0.10.0',
],
'torch': ['torch~=1.2'],
'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,8 +3,8 @@\n extras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n- 'tensorflow~=2.0',\n- 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n+ 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n+ 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n", "issue": "Keep tighter version constraints on TensorFlow releases\n# Description\r\n\r\nAs Issue #997 makes it clear that minor releases might as well be major releases for TensorFlow, then it is probably worth keeping tighter version constraints on them and just watching the releases of TensorFlow and TensorFlow Probability to see when we can relax these.\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4,>=1.4.3'], # Use \"name\" keyword in MINUIT optimizer\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,323 | 170 |
gh_patches_debug_41578
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-1368
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add optional pull to local agent
If the image is present locally we don't want to attempt a pull from a registry
</issue>
<code>
[start of src/prefect/cli/agent.py]
1 import click
2
3 from prefect import config
4 from prefect.utilities.configuration import set_temporary_config
5 from prefect.utilities.serialization import from_qualified_name
6
7 _agents = {
8 "local": "prefect.agent.local.LocalAgent",
9 "kubernetes": "prefect.agent.kubernetes.KubernetesAgent",
10 "nomad": "prefect.agent.nomad.NomadAgent",
11 }
12
13
14 @click.group(hidden=True)
15 def agent():
16 """
17 Manage Prefect agents.
18
19 \b
20 Usage:
21 $ prefect agent [COMMAND]
22
23 \b
24 Arguments:
25 start Start a Prefect agent
26
27 \b
28 Examples:
29 $ prefect agent start
30
31 \b
32 $ prefect agent start kubernetes --token MY_TOKEN
33 """
34 pass
35
36
37 @agent.command(hidden=True)
38 @click.argument("name", default="local")
39 @click.option(
40 "--token", "-t", required=False, help="A Prefect Cloud API token.", hidden=True
41 )
42 def start(name, token):
43 """
44 Start an agent.
45
46 \b
47 Arguments:
48 name TEXT The name of an agent to start (e.g. `local`, `kubernetes`, `nomad`)
49 Defaults to `local`
50
51 \b
52 Options:
53 --token, -t TEXT A Prefect Cloud api token
54 """
55 with set_temporary_config(
56 {"cloud.agent.auth_token": token or config.cloud.agent.auth_token}
57 ):
58 retrieved_agent = _agents.get(name, None)
59
60 if not retrieved_agent:
61 click.secho("{} is not a valid agent".format(name), fg="red")
62 return
63
64 from_qualified_name(retrieved_agent)().start()
65
[end of src/prefect/cli/agent.py]
[start of src/prefect/agent/local/agent.py]
1 import docker
2
3 from prefect import config
4 from prefect.agent import Agent
5 from prefect.environments.storage import Docker
6 from prefect.serialization.storage import StorageSchema
7 from prefect.utilities.graphql import GraphQLResult
8
9
10 class LocalAgent(Agent):
11 """
12 Agent which deploys flow runs locally as Docker containers.
13
14 Args:
15 - base_url (str, optional): URL for a Docker daemon server. Defaults to
16 `unix:///var/run/docker.sock` however other hosts such as
17 `tcp://0.0.0.0:2375` can be provided
18 """
19
20 def __init__(self, base_url: str = None) -> None:
21 super().__init__()
22
23 base_url = base_url or "unix://var/run/docker.sock"
24 self.docker_client = docker.APIClient(base_url=base_url, version="auto")
25
26 # Ping Docker daemon for connection issues
27 try:
28 self.docker_client.ping()
29 except Exception as exc:
30 self.logger.error(
31 "Issue connecting to the Docker daemon. Make sure it is running."
32 )
33 raise exc
34
35 def deploy_flows(self, flow_runs: list) -> None:
36 """
37 Deploy flow runs on your local machine as Docker containers
38
39 Args:
40 - flow_runs (list): A list of GraphQLResult flow run objects
41 """
42 for flow_run in flow_runs:
43
44 storage = StorageSchema().load(flow_run.flow.storage)
45 if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):
46 self.logger.error(
47 "Storage for flow run {} is not of type Docker.".format(flow_run.id)
48 )
49 continue
50
51 env_vars = self.populate_env_vars(flow_run=flow_run)
52
53 # Pull image if it doesn't exist locally
54 self.docker_client.pull(storage.name)
55
56 # Create a container
57 container = self.docker_client.create_container(
58 storage.name, command="prefect execute cloud-flow", environment=env_vars
59 )
60
61 # Start the container
62 self.docker_client.start(container=container.get("Id"))
63
64 def populate_env_vars(self, flow_run: GraphQLResult) -> dict:
65 """
66 Populate metadata and variables in the environment variables for a flow run
67
68 Args:
69 - flow_run (GraphQLResult): A flow run object
70
71 Returns:
72 - dict: a dictionary representing the populated environment variables
73 """
74 return {
75 "PREFECT__CLOUD__API": config.cloud.api,
76 "PREFECT__CLOUD__AUTH_TOKEN": config.cloud.agent.auth_token,
77 "PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore
78 "PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
79 "PREFECT__LOGGING__LOG_TO_CLOUD": "true",
80 "PREFECT__LOGGING__LEVEL": "DEBUG",
81 "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
82 "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
83 }
84
85
86 if __name__ == "__main__":
87 LocalAgent().start()
88
[end of src/prefect/agent/local/agent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py
--- a/src/prefect/agent/local/agent.py
+++ b/src/prefect/agent/local/agent.py
@@ -1,6 +1,6 @@
import docker
-from prefect import config
+from prefect import config, context
from prefect.agent import Agent
from prefect.environments.storage import Docker
from prefect.serialization.storage import StorageSchema
@@ -15,9 +15,11 @@
- base_url (str, optional): URL for a Docker daemon server. Defaults to
`unix:///var/run/docker.sock` however other hosts such as
`tcp://0.0.0.0:2375` can be provided
+ - no_pull (bool, optional): Flag on whether or not to pull flow images.
+ Defaults to `False` if not provided here or in context.
"""
- def __init__(self, base_url: str = None) -> None:
+ def __init__(self, base_url: str = None, no_pull: bool = None) -> None:
super().__init__()
base_url = base_url or "unix://var/run/docker.sock"
@@ -32,6 +34,10 @@
)
raise exc
+ self.no_pull = no_pull or context.get("no_pull")
+ if self.no_pull is None:
+ self.no_pull = False
+
def deploy_flows(self, flow_runs: list) -> None:
"""
Deploy flow runs on your local machine as Docker containers
@@ -50,8 +56,8 @@
env_vars = self.populate_env_vars(flow_run=flow_run)
- # Pull image if it doesn't exist locally
- self.docker_client.pull(storage.name)
+ if not self.no_pull:
+ self.docker_client.pull(storage.name)
# Create a container
container = self.docker_client.create_container(
diff --git a/src/prefect/cli/agent.py b/src/prefect/cli/agent.py
--- a/src/prefect/cli/agent.py
+++ b/src/prefect/cli/agent.py
@@ -1,6 +1,6 @@
import click
-from prefect import config
+from prefect import config, context
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.serialization import from_qualified_name
@@ -39,7 +39,8 @@
@click.option(
"--token", "-t", required=False, help="A Prefect Cloud API token.", hidden=True
)
-def start(name, token):
[email protected]("--no-pull", is_flag=True, help="Pull images flag.", hidden=True)
+def start(name, token, no_pull):
"""
Start an agent.
@@ -51,6 +52,8 @@
\b
Options:
--token, -t TEXT A Prefect Cloud api token
+ --no-pull Pull images for a LocalAgent
+ Defaults to pulling if not provided
"""
with set_temporary_config(
{"cloud.agent.auth_token": token or config.cloud.agent.auth_token}
@@ -61,4 +64,5 @@
click.secho("{} is not a valid agent".format(name), fg="red")
return
- from_qualified_name(retrieved_agent)().start()
+ with context(no_pull=no_pull):
+ from_qualified_name(retrieved_agent)().start()
|
{"golden_diff": "diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py\n--- a/src/prefect/agent/local/agent.py\n+++ b/src/prefect/agent/local/agent.py\n@@ -1,6 +1,6 @@\n import docker\n \n-from prefect import config\n+from prefect import config, context\n from prefect.agent import Agent\n from prefect.environments.storage import Docker\n from prefect.serialization.storage import StorageSchema\n@@ -15,9 +15,11 @@\n - base_url (str, optional): URL for a Docker daemon server. Defaults to\n `unix:///var/run/docker.sock` however other hosts such as\n `tcp://0.0.0.0:2375` can be provided\n+ - no_pull (bool, optional): Flag on whether or not to pull flow images.\n+ Defaults to `False` if not provided here or in context.\n \"\"\"\n \n- def __init__(self, base_url: str = None) -> None:\n+ def __init__(self, base_url: str = None, no_pull: bool = None) -> None:\n super().__init__()\n \n base_url = base_url or \"unix://var/run/docker.sock\"\n@@ -32,6 +34,10 @@\n )\n raise exc\n \n+ self.no_pull = no_pull or context.get(\"no_pull\")\n+ if self.no_pull is None:\n+ self.no_pull = False\n+\n def deploy_flows(self, flow_runs: list) -> None:\n \"\"\"\n Deploy flow runs on your local machine as Docker containers\n@@ -50,8 +56,8 @@\n \n env_vars = self.populate_env_vars(flow_run=flow_run)\n \n- # Pull image if it doesn't exist locally\n- self.docker_client.pull(storage.name)\n+ if not self.no_pull:\n+ self.docker_client.pull(storage.name)\n \n # Create a container\n container = self.docker_client.create_container(\ndiff --git a/src/prefect/cli/agent.py b/src/prefect/cli/agent.py\n--- a/src/prefect/cli/agent.py\n+++ b/src/prefect/cli/agent.py\n@@ -1,6 +1,6 @@\n import click\n \n-from prefect import config\n+from prefect import config, context\n from prefect.utilities.configuration import set_temporary_config\n from prefect.utilities.serialization import from_qualified_name\n \n@@ -39,7 +39,8 @@\n @click.option(\n \"--token\", \"-t\", required=False, help=\"A Prefect Cloud API token.\", hidden=True\n )\n-def start(name, token):\[email protected](\"--no-pull\", is_flag=True, help=\"Pull images flag.\", hidden=True)\n+def start(name, token, no_pull):\n \"\"\"\n Start an agent.\n \n@@ -51,6 +52,8 @@\n \\b\n Options:\n --token, -t TEXT A Prefect Cloud api token\n+ --no-pull Pull images for a LocalAgent\n+ Defaults to pulling if not provided\n \"\"\"\n with set_temporary_config(\n {\"cloud.agent.auth_token\": token or config.cloud.agent.auth_token}\n@@ -61,4 +64,5 @@\n click.secho(\"{} is not a valid agent\".format(name), fg=\"red\")\n return\n \n- from_qualified_name(retrieved_agent)().start()\n+ with context(no_pull=no_pull):\n+ from_qualified_name(retrieved_agent)().start()\n", "issue": "Add optional pull to local agent\nIf the image is present locally we don't want to attempt a pull from a registry\n", "before_files": [{"content": "import click\n\nfrom prefect import config\nfrom prefect.utilities.configuration import set_temporary_config\nfrom prefect.utilities.serialization import from_qualified_name\n\n_agents = {\n \"local\": \"prefect.agent.local.LocalAgent\",\n \"kubernetes\": \"prefect.agent.kubernetes.KubernetesAgent\",\n \"nomad\": \"prefect.agent.nomad.NomadAgent\",\n}\n\n\[email protected](hidden=True)\ndef agent():\n \"\"\"\n Manage Prefect agents.\n\n \\b\n Usage:\n $ prefect agent [COMMAND]\n\n \\b\n Arguments:\n start Start a Prefect agent\n\n \\b\n Examples:\n $ prefect agent start\n\n \\b\n $ prefect agent start kubernetes --token MY_TOKEN\n \"\"\"\n pass\n\n\[email protected](hidden=True)\[email protected](\"name\", default=\"local\")\[email protected](\n \"--token\", \"-t\", required=False, help=\"A Prefect Cloud API token.\", hidden=True\n)\ndef start(name, token):\n \"\"\"\n Start an agent.\n\n \\b\n Arguments:\n name TEXT The name of an agent to start (e.g. `local`, `kubernetes`, `nomad`)\n Defaults to `local`\n\n \\b\n Options:\n --token, -t TEXT A Prefect Cloud api token\n \"\"\"\n with set_temporary_config(\n {\"cloud.agent.auth_token\": token or config.cloud.agent.auth_token}\n ):\n retrieved_agent = _agents.get(name, None)\n\n if not retrieved_agent:\n click.secho(\"{} is not a valid agent\".format(name), fg=\"red\")\n return\n\n from_qualified_name(retrieved_agent)().start()\n", "path": "src/prefect/cli/agent.py"}, {"content": "import docker\n\nfrom prefect import config\nfrom prefect.agent import Agent\nfrom prefect.environments.storage import Docker\nfrom prefect.serialization.storage import StorageSchema\nfrom prefect.utilities.graphql import GraphQLResult\n\n\nclass LocalAgent(Agent):\n \"\"\"\n Agent which deploys flow runs locally as Docker containers.\n\n Args:\n - base_url (str, optional): URL for a Docker daemon server. Defaults to\n `unix:///var/run/docker.sock` however other hosts such as\n `tcp://0.0.0.0:2375` can be provided\n \"\"\"\n\n def __init__(self, base_url: str = None) -> None:\n super().__init__()\n\n base_url = base_url or \"unix://var/run/docker.sock\"\n self.docker_client = docker.APIClient(base_url=base_url, version=\"auto\")\n\n # Ping Docker daemon for connection issues\n try:\n self.docker_client.ping()\n except Exception as exc:\n self.logger.error(\n \"Issue connecting to the Docker daemon. Make sure it is running.\"\n )\n raise exc\n\n def deploy_flows(self, flow_runs: list) -> None:\n \"\"\"\n Deploy flow runs on your local machine as Docker containers\n\n Args:\n - flow_runs (list): A list of GraphQLResult flow run objects\n \"\"\"\n for flow_run in flow_runs:\n\n storage = StorageSchema().load(flow_run.flow.storage)\n if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):\n self.logger.error(\n \"Storage for flow run {} is not of type Docker.\".format(flow_run.id)\n )\n continue\n\n env_vars = self.populate_env_vars(flow_run=flow_run)\n\n # Pull image if it doesn't exist locally\n self.docker_client.pull(storage.name)\n\n # Create a container\n container = self.docker_client.create_container(\n storage.name, command=\"prefect execute cloud-flow\", environment=env_vars\n )\n\n # Start the container\n self.docker_client.start(container=container.get(\"Id\"))\n\n def populate_env_vars(self, flow_run: GraphQLResult) -> dict:\n \"\"\"\n Populate metadata and variables in the environment variables for a flow run\n\n Args:\n - flow_run (GraphQLResult): A flow run object\n\n Returns:\n - dict: a dictionary representing the populated environment variables\n \"\"\"\n return {\n \"PREFECT__CLOUD__API\": config.cloud.api,\n \"PREFECT__CLOUD__AUTH_TOKEN\": config.cloud.agent.auth_token,\n \"PREFECT__CONTEXT__FLOW_RUN_ID\": flow_run.id, # type: ignore\n \"PREFECT__CLOUD__USE_LOCAL_SECRETS\": \"false\",\n \"PREFECT__LOGGING__LOG_TO_CLOUD\": \"true\",\n \"PREFECT__LOGGING__LEVEL\": \"DEBUG\",\n \"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudFlowRunner\",\n \"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS\": \"prefect.engine.cloud.CloudTaskRunner\",\n }\n\n\nif __name__ == \"__main__\":\n LocalAgent().start()\n", "path": "src/prefect/agent/local/agent.py"}]}
| 1,926 | 760 |
gh_patches_debug_61788
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-2656
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The import/export between Menorca and Mallorca appears reversed
Our data is showing the export going one way, but our data source is showing the reverse according to one user. See screenshots:


</issue>
<code>
[start of parsers/ES_IB.py]
1 #!/usr/bin/env python3
2
3 import logging
4 from arrow import get
5 from requests import Session
6 from ree import (Formentera, Ibiza,
7 Mallorca, Menorca,
8 BalearicIslands)
9 # package "ree" is used to parse data from www.ree.es // maintained on github by @hectorespert
10
11 from .lib.exceptions import ParserException
12 from .lib.validation import validate, validate_production_diffs
13
14 ## Guess we'll need to figure these out later?! Adapted from ES-CN:
15
16 # Minimum valid zone demand. This is used to eliminate some cases
17 # where generation for one or more modes is obviously missing.
18 FLOORS = {
19 'ES-IB': 0,
20 'ES-IB-FO': 0,
21 'ES-IB-IZ': 0,
22 'ES-IB-MA': 0,
23 'ES-IB-ME': 0,
24 }
25
26
27 def fetch_island_data(zone_key, session):
28 if zone_key == 'ES-IB-FO':
29 formentera_data = Formentera(session, verify=False).get_all()
30 if not formentera_data:
31 raise ParserException(zone_key, "Formentera doesn't respond")
32 else:
33 return formentera_data
34 elif zone_key == 'ES-IB-IZ':
35 ibiza_data = Ibiza(session, verify=False).get_all()
36 if not ibiza_data:
37 raise ParserException(zone_key, "Party is over, Ibiza doesn't respond")
38 else:
39 return ibiza_data
40 elif zone_key == 'ES-IB-MA':
41 mallorca_data = Mallorca(session, verify=False).get_all()
42 if not mallorca_data:
43 raise ParserException(zone_key, "Mallorca doesn't respond")
44 else:
45 return mallorca_data
46 elif zone_key == 'ES-IB-ME':
47 menorca_data = Menorca(session, verify=False).get_all()
48 if not menorca_data:
49 raise ParserException(zone_key, "Menorca doesn't respond")
50 else:
51 return menorca_data
52 elif zone_key == 'ES-IB':
53 balearic_islands = BalearicIslands(session, verify=False).get_all()
54 if not balearic_islands:
55 raise ParserException(zone_key, "Balearic Islands doesn't respond")
56 else:
57 return balearic_islands
58 else:
59 raise ParserException(zone_key, 'Can\'t read this country code {0}'.format(zone_key))
60
61
62 def fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):
63 if target_datetime:
64 raise NotImplementedError('This parser is not yet able to parse past dates')
65
66 ses = session or Session()
67 island_data = fetch_island_data(zone_key, ses)
68 data = []
69 for response in island_data:
70 response_data = {
71 'zoneKey': zone_key,
72 'datetime': get(response.timestamp).datetime,
73 'consumption': response.demand,
74 'source': 'demanda.ree.es'
75 }
76
77 data.append(response_data)
78
79 return data
80
81
82 def fetch_production(zone_key, session=None, target_datetime=None,
83 logger=logging.getLogger(__name__)):
84 if target_datetime:
85 raise NotImplementedError('This parser is not yet able to parse past dates')
86
87 ses = session or Session()
88 island_data = fetch_island_data(zone_key, ses)
89 data = []
90
91 if zone_key == 'ES-IB':
92 expected_range = {'coal': (50,600)}
93 else:
94 expected_range = None
95
96 for response in island_data:
97 if response.production() >= 0:
98 response_data = {
99 'zoneKey': zone_key,
100 'datetime': get(response.timestamp).datetime,
101 'production': {
102 'coal': response.carbon,
103 'gas': round(response.gas + response.combined, 2),
104 'solar': response.solar,
105 'oil': round(response.vapor + response.diesel, 2),
106 'wind': response.wind,
107 'hydro': response.hydraulic,
108 'biomass': response.waste,
109 'nuclear': 0.0,
110 'geothermal': 0.0,
111 'unknown': response.other
112 },
113 'storage': {
114 'hydro': 0.0,
115 'battery': 0.0
116 },
117 'source': 'demanda.ree.es',
118 }
119
120 response_data = validate(response_data, logger,
121 floor=FLOORS[zone_key],
122 expected_range = expected_range)
123
124 if response_data:
125 # append if valid
126 data.append(response_data)
127
128 if len(data) > 1:
129 # granularity is 10 minutes, drops points with change in coal > 100MW
130 data = validate_production_diffs(data, {'coal': 150}, logger)
131
132 return data
133
134
135 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
136
137 if target_datetime:
138 raise NotImplementedError('This parser is not yet able to parse past dates')
139
140 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
141
142 ses = session or Session()
143
144 if sorted_zone_keys == 'ES->ES-IB':
145 responses = BalearicIslands(ses, verify=False).get_all()
146 if not responses:
147 raise ParserException("ES-IB", "No responses")
148 elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':
149 responses = Mallorca(ses, verify=False).get_all()
150 if not responses:
151 raise ParserException("ES-IB-MA", "No responses")
152 elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
153 responses = Formentera(ses, verify=False).get_all()
154 if not responses:
155 raise ParserException("ES-IB-FO", "No responses")
156 else:
157 raise NotImplementedError('This exchange pair is not implemented')
158
159 exchanges = []
160 for response in responses:
161
162 if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':
163 net_flow = response.link['ma_me']
164 elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':
165 net_flow = response.link['ma_ib']
166 elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
167 net_flow = -1 * response.link['ib_fo']
168 else:
169 net_flow = response.link['pe_ma']
170
171 exchange = {
172 'sortedZoneKeys': sorted_zone_keys,
173 'datetime': get(response.timestamp).datetime,
174 'netFlow': net_flow,
175 'source': 'demanda.ree.es',
176 }
177
178 exchanges.append(exchange)
179
180 return exchanges
181
182
183 if __name__ == '__main__':
184 session = Session
185 print("fetch_consumption(ES-IB)")
186 print(fetch_consumption('ES-IB', session))
187
188 print("fetch_production(ES-IB)")
189 print(fetch_production('ES-IB', session))
190
191 print("fetch_exchange(ES, ES-IB)")
192 print(fetch_exchange('ES', 'ES-IB', session))
193
194 print("fetch_consumption(ES-IB-FO)")
195 print(fetch_consumption('ES-IB-FO'))
196 print("fetch_production(ES-IB-FO)")
197 print(fetch_production('ES-IB-FO'))
198 print("fetch_consumption(ES-IB-IZ)")
199 print(fetch_consumption('ES-IB-IZ'))
200 print("fetch_production(ES-IB-IZ)")
201 print(fetch_production('ES-IB-IZ'))
202 print("fetch_consumption(ES-IB-MA)")
203 print(fetch_consumption('ES-IB-MA'))
204 print("fetch_production(ES-IB-MA)")
205 print(fetch_production('ES-IB-MA'))
206 print("fetch_consumption(ES-IB-ME)")
207 print(fetch_consumption('ES-IB-ME'))
208 print("fetch_production(ES-IB-ME)")
209 print(fetch_production('ES-IB-ME'))
210 print("fetch_exchange(ES, ES-IB-MA)")
211 print(fetch_exchange('ES', 'ES-IB-MA'))
212 print("fetch_exchange(ES-IB-MA, ES-IB-ME)")
213 print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))
214 print("fetch_exchange(ES-IB-MA, ES-IB-IZ)")
215 print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))
216 print("fetch_exchange(ES-IB-IZ, ES-IB-FO)")
217 print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))
218
[end of parsers/ES_IB.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/ES_IB.py b/parsers/ES_IB.py
--- a/parsers/ES_IB.py
+++ b/parsers/ES_IB.py
@@ -160,7 +160,7 @@
for response in responses:
if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':
- net_flow = response.link['ma_me']
+ net_flow = -1 * response.link['ma_me']
elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':
net_flow = response.link['ma_ib']
elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':
|
{"golden_diff": "diff --git a/parsers/ES_IB.py b/parsers/ES_IB.py\n--- a/parsers/ES_IB.py\n+++ b/parsers/ES_IB.py\n@@ -160,7 +160,7 @@\n for response in responses:\n \n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n- net_flow = response.link['ma_me']\n+ net_flow = -1 * response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n", "issue": "The import/export between Menorca and Mallorca appears reversed\nOur data is showing the export going one way, but our data source is showing the reverse according to one user. See screenshots:\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport logging\nfrom arrow import get\nfrom requests import Session\nfrom ree import (Formentera, Ibiza,\n Mallorca, Menorca,\n BalearicIslands)\n# package \"ree\" is used to parse data from www.ree.es // maintained on github by @hectorespert\n\nfrom .lib.exceptions import ParserException\nfrom .lib.validation import validate, validate_production_diffs\n\n## Guess we'll need to figure these out later?! Adapted from ES-CN:\n\n# Minimum valid zone demand. This is used to eliminate some cases\n# where generation for one or more modes is obviously missing.\nFLOORS = {\n 'ES-IB': 0,\n 'ES-IB-FO': 0,\n 'ES-IB-IZ': 0,\n 'ES-IB-MA': 0,\n 'ES-IB-ME': 0,\n}\n\n\ndef fetch_island_data(zone_key, session):\n if zone_key == 'ES-IB-FO':\n formentera_data = Formentera(session, verify=False).get_all()\n if not formentera_data:\n raise ParserException(zone_key, \"Formentera doesn't respond\")\n else:\n return formentera_data\n elif zone_key == 'ES-IB-IZ':\n ibiza_data = Ibiza(session, verify=False).get_all()\n if not ibiza_data:\n raise ParserException(zone_key, \"Party is over, Ibiza doesn't respond\")\n else:\n return ibiza_data\n elif zone_key == 'ES-IB-MA':\n mallorca_data = Mallorca(session, verify=False).get_all()\n if not mallorca_data:\n raise ParserException(zone_key, \"Mallorca doesn't respond\")\n else:\n return mallorca_data\n elif zone_key == 'ES-IB-ME':\n menorca_data = Menorca(session, verify=False).get_all()\n if not menorca_data:\n raise ParserException(zone_key, \"Menorca doesn't respond\")\n else:\n return menorca_data\n elif zone_key == 'ES-IB':\n balearic_islands = BalearicIslands(session, verify=False).get_all()\n if not balearic_islands:\n raise ParserException(zone_key, \"Balearic Islands doesn't respond\")\n else:\n return balearic_islands\n else:\n raise ParserException(zone_key, 'Can\\'t read this country code {0}'.format(zone_key))\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n for response in island_data:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'consumption': response.demand,\n 'source': 'demanda.ree.es'\n }\n\n data.append(response_data)\n\n return data\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n ses = session or Session()\n island_data = fetch_island_data(zone_key, ses)\n data = []\n\n if zone_key == 'ES-IB':\n expected_range = {'coal': (50,600)}\n else:\n expected_range = None\n\n for response in island_data:\n if response.production() >= 0:\n response_data = {\n 'zoneKey': zone_key,\n 'datetime': get(response.timestamp).datetime,\n 'production': {\n 'coal': response.carbon,\n 'gas': round(response.gas + response.combined, 2),\n 'solar': response.solar,\n 'oil': round(response.vapor + response.diesel, 2),\n 'wind': response.wind,\n 'hydro': response.hydraulic,\n 'biomass': response.waste,\n 'nuclear': 0.0,\n 'geothermal': 0.0,\n 'unknown': response.other\n },\n 'storage': {\n 'hydro': 0.0,\n 'battery': 0.0\n },\n 'source': 'demanda.ree.es',\n }\n\n response_data = validate(response_data, logger,\n floor=FLOORS[zone_key],\n expected_range = expected_range)\n\n if response_data:\n # append if valid\n data.append(response_data)\n\n if len(data) > 1:\n # granularity is 10 minutes, drops points with change in coal > 100MW\n data = validate_production_diffs(data, {'coal': 150}, logger)\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n ses = session or Session()\n\n if sorted_zone_keys == 'ES->ES-IB':\n responses = BalearicIslands(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB\", \"No responses\")\n elif sorted_zone_keys == 'ES->ES-IB-MA' or sorted_zone_keys == 'ES-IB-MA->ES-IB-ME' or sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n responses = Mallorca(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-MA\", \"No responses\")\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n responses = Formentera(ses, verify=False).get_all()\n if not responses:\n raise ParserException(\"ES-IB-FO\", \"No responses\")\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n exchanges = []\n for response in responses:\n\n if sorted_zone_keys == 'ES-IB-MA->ES-IB-ME':\n net_flow = response.link['ma_me']\n elif sorted_zone_keys == 'ES-IB-IZ->ES-IB-MA':\n net_flow = response.link['ma_ib']\n elif sorted_zone_keys == 'ES-IB-FO->ES-IB-IZ':\n net_flow = -1 * response.link['ib_fo']\n else:\n net_flow = response.link['pe_ma']\n\n exchange = {\n 'sortedZoneKeys': sorted_zone_keys,\n 'datetime': get(response.timestamp).datetime,\n 'netFlow': net_flow,\n 'source': 'demanda.ree.es',\n }\n\n exchanges.append(exchange)\n\n return exchanges\n\n\nif __name__ == '__main__':\n session = Session\n print(\"fetch_consumption(ES-IB)\")\n print(fetch_consumption('ES-IB', session))\n\n print(\"fetch_production(ES-IB)\")\n print(fetch_production('ES-IB', session))\n\n print(\"fetch_exchange(ES, ES-IB)\")\n print(fetch_exchange('ES', 'ES-IB', session))\n\n print(\"fetch_consumption(ES-IB-FO)\")\n print(fetch_consumption('ES-IB-FO'))\n print(\"fetch_production(ES-IB-FO)\")\n print(fetch_production('ES-IB-FO'))\n print(\"fetch_consumption(ES-IB-IZ)\")\n print(fetch_consumption('ES-IB-IZ'))\n print(\"fetch_production(ES-IB-IZ)\")\n print(fetch_production('ES-IB-IZ'))\n print(\"fetch_consumption(ES-IB-MA)\")\n print(fetch_consumption('ES-IB-MA'))\n print(\"fetch_production(ES-IB-MA)\")\n print(fetch_production('ES-IB-MA'))\n print(\"fetch_consumption(ES-IB-ME)\")\n print(fetch_consumption('ES-IB-ME'))\n print(\"fetch_production(ES-IB-ME)\")\n print(fetch_production('ES-IB-ME'))\n print(\"fetch_exchange(ES, ES-IB-MA)\")\n print(fetch_exchange('ES', 'ES-IB-MA'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-ME)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-ME'))\n print(\"fetch_exchange(ES-IB-MA, ES-IB-IZ)\")\n print(fetch_exchange('ES-IB-MA', 'ES-IB-IZ'))\n print(\"fetch_exchange(ES-IB-IZ, ES-IB-FO)\")\n print(fetch_exchange('ES-IB-IZ', 'ES-IB-FO'))\n", "path": "parsers/ES_IB.py"}]}
| 3,172 | 149 |
gh_patches_debug_19364
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-247
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Fix] sys.stderr.fileno() throws an AttributeError exception
When I tried to delivered the playwright tasks via `celery==4.4.7`, I got an `AttributeError` exception at line 50 in `main.py`.
`except AttributeError:`
I've checked the source from `pytest`, the original code is:
```
except (AttributeError, io.UnsupportedOperation):
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
# This is potentially dangerous, but the best we can do.
return sys.__stderr__.fileno()
```
It perfectly works if I changed it to the original code, so I think we should be consistent with the original source code to enhance compatibility.
I also checked the celery official docs. The reason why I got an `AttributeError` exception probably is that the log proxy of celery is not support the `stderr` attribute.
[Fix] sys.stderr.fileno() throws an AttributeError exception
When I tried to delivered the playwright tasks via `celery==4.4.7`, I got an `AttributeError` exception at line 50 in `main.py`.
`except AttributeError:`
I've checked the source from `pytest`, the original code is:
```
except (AttributeError, io.UnsupportedOperation):
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
# This is potentially dangerous, but the best we can do.
return sys.__stderr__.fileno()
```
It perfectly works if I changed it to the original code, so I think we should be consistent with the original source code to enhance compatibility.
I also checked the celery official docs. The reason why I got an `AttributeError` exception probably is that the log proxy of celery is not support the `stderr` attribute.
</issue>
<code>
[start of playwright/main.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import io
17 import os
18 import subprocess
19 import sys
20 from pathlib import Path
21 from typing import Any
22
23 from greenlet import greenlet
24
25 from playwright.async_api import Playwright as AsyncPlaywright
26 from playwright.connection import Connection
27 from playwright.helper import Error
28 from playwright.object_factory import create_remote_object
29 from playwright.path_utils import get_file_dirname
30 from playwright.playwright import Playwright
31 from playwright.sync_api import Playwright as SyncPlaywright
32 from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber
33
34
35 def compute_driver_executable() -> Path:
36 package_path = get_file_dirname()
37 platform = sys.platform
38 if platform == "win32":
39 return package_path / "driver" / "playwright-cli.exe"
40 return package_path / "driver" / "playwright-cli"
41
42
43 async def run_driver_async() -> Connection:
44 driver_executable = compute_driver_executable()
45
46 # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80
47 def _get_stderr_fileno() -> int:
48 try:
49 return sys.stderr.fileno()
50 except io.UnsupportedOperation:
51 # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
52 # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
53 # This is potentially dangerous, but the best we can do.
54 return sys.__stderr__.fileno()
55
56 proc = await asyncio.create_subprocess_exec(
57 str(driver_executable),
58 "run-driver",
59 stdin=asyncio.subprocess.PIPE,
60 stdout=asyncio.subprocess.PIPE,
61 stderr=_get_stderr_fileno(),
62 limit=32768,
63 )
64 assert proc.stdout
65 assert proc.stdin
66 connection = Connection(
67 proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()
68 )
69 return connection
70
71
72 def run_driver() -> Connection:
73 loop = asyncio.get_event_loop()
74 if loop.is_running():
75 raise Error("Can only run one Playwright at a time.")
76 return loop.run_until_complete(run_driver_async())
77
78
79 class SyncPlaywrightContextManager:
80 def __init__(self) -> None:
81 self._connection = run_driver()
82 self._playwright: SyncPlaywright
83
84 def __enter__(self) -> SyncPlaywright:
85 g_self = greenlet.getcurrent()
86
87 def callback_wrapper(playwright_impl: Playwright) -> None:
88 self._playwright = SyncPlaywright(playwright_impl)
89 g_self.switch()
90
91 self._connection.call_on_object_with_known_name("Playwright", callback_wrapper)
92 set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))
93 dispatcher_fiber().switch()
94 playwright = self._playwright
95 playwright.stop = self.__exit__ # type: ignore
96 return playwright
97
98 def start(self) -> SyncPlaywright:
99 return self.__enter__()
100
101 def __exit__(self, *args: Any) -> None:
102 self._connection.stop_sync()
103
104
105 class AsyncPlaywrightContextManager:
106 def __init__(self) -> None:
107 self._connection: Connection
108
109 async def __aenter__(self) -> AsyncPlaywright:
110 self._connection = await run_driver_async()
111 self._connection.run_async()
112 playwright = AsyncPlaywright(
113 await self._connection.wait_for_object_with_known_name("Playwright")
114 )
115 playwright.stop = self.__aexit__ # type: ignore
116 return playwright
117
118 async def start(self) -> AsyncPlaywright:
119 return await self.__aenter__()
120
121 async def __aexit__(self, *args: Any) -> None:
122 self._connection.stop_async()
123
124
125 if sys.platform == "win32":
126 # Use ProactorEventLoop in 3.7, which is default in 3.8
127 loop = asyncio.ProactorEventLoop()
128 asyncio.set_event_loop(loop)
129
130
131 def main() -> None:
132 driver_executable = compute_driver_executable()
133 my_env = os.environ.copy()
134 my_env["PW_CLI_TARGET_LANG"] = "python"
135 subprocess.run([str(driver_executable), *sys.argv[1:]], env=my_env)
136
[end of playwright/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/playwright/main.py b/playwright/main.py
--- a/playwright/main.py
+++ b/playwright/main.py
@@ -43,11 +43,11 @@
async def run_driver_async() -> Connection:
driver_executable = compute_driver_executable()
- # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80
+ # Sourced from: https://github.com/pytest-dev/pytest/blob/824e9cf67abcfc47df25a59bf32ebd8c25fbd02a/src/_pytest/faulthandler.py#L70-L77
def _get_stderr_fileno() -> int:
try:
return sys.stderr.fileno()
- except io.UnsupportedOperation:
+ except (AttributeError, io.UnsupportedOperation):
# pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
# https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
# This is potentially dangerous, but the best we can do.
|
{"golden_diff": "diff --git a/playwright/main.py b/playwright/main.py\n--- a/playwright/main.py\n+++ b/playwright/main.py\n@@ -43,11 +43,11 @@\n async def run_driver_async() -> Connection:\n driver_executable = compute_driver_executable()\n \n- # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n+ # Sourced from: https://github.com/pytest-dev/pytest/blob/824e9cf67abcfc47df25a59bf32ebd8c25fbd02a/src/_pytest/faulthandler.py#L70-L77\n def _get_stderr_fileno() -> int:\n try:\n return sys.stderr.fileno()\n- except io.UnsupportedOperation:\n+ except (AttributeError, io.UnsupportedOperation):\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n", "issue": "[Fix] sys.stderr.fileno() throws an AttributeError exception\nWhen I tried to delivered the playwright tasks via `celery==4.4.7`, I got an `AttributeError` exception at line 50 in `main.py`. \r\n\r\n`except AttributeError:`\r\n\r\nI've checked the source from `pytest`, the original code is:\r\n\r\n ```\r\n except (AttributeError, io.UnsupportedOperation):\r\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\r\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\r\n # This is potentially dangerous, but the best we can do.\r\n return sys.__stderr__.fileno()\r\n```\r\n\r\nIt perfectly works if I changed it to the original code, so I think we should be consistent with the original source code to enhance compatibility.\r\n\r\nI also checked the celery official docs. The reason why I got an `AttributeError` exception probably is that the log proxy of celery is not support the `stderr` attribute.\r\n\n[Fix] sys.stderr.fileno() throws an AttributeError exception\nWhen I tried to delivered the playwright tasks via `celery==4.4.7`, I got an `AttributeError` exception at line 50 in `main.py`. \r\n\r\n`except AttributeError:`\r\n\r\nI've checked the source from `pytest`, the original code is:\r\n\r\n ```\r\n except (AttributeError, io.UnsupportedOperation):\r\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\r\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\r\n # This is potentially dangerous, but the best we can do.\r\n return sys.__stderr__.fileno()\r\n```\r\n\r\nIt perfectly works if I changed it to the original code, so I think we should be consistent with the original source code to enhance compatibility.\r\n\r\nI also checked the celery official docs. The reason why I got an `AttributeError` exception probably is that the log proxy of celery is not support the `stderr` attribute.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_executable() -> Path:\n package_path = get_file_dirname()\n platform = sys.platform\n if platform == \"win32\":\n return package_path / \"driver\" / \"playwright-cli.exe\"\n return package_path / \"driver\" / \"playwright-cli\"\n\n\nasync def run_driver_async() -> Connection:\n driver_executable = compute_driver_executable()\n\n # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n def _get_stderr_fileno() -> int:\n try:\n return sys.stderr.fileno()\n except io.UnsupportedOperation:\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n return sys.__stderr__.fileno()\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n playwright = self._playwright\n playwright.stop = self.__exit__ # type: ignore\n return playwright\n\n def start(self) -> SyncPlaywright:\n return self.__enter__()\n\n def __exit__(self, *args: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n playwright = AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n playwright.stop = self.__aexit__ # type: ignore\n return playwright\n\n async def start(self) -> AsyncPlaywright:\n return await self.__aenter__()\n\n async def __aexit__(self, *args: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n driver_executable = compute_driver_executable()\n my_env = os.environ.copy()\n my_env[\"PW_CLI_TARGET_LANG\"] = \"python\"\n subprocess.run([str(driver_executable), *sys.argv[1:]], env=my_env)\n", "path": "playwright/main.py"}]}
| 2,338 | 291 |
gh_patches_debug_517
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-4963
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
emotion_detection/utils.py代码错误
utils.py第62行:seq_len = max_seq_len 是缩进错误吧?应该包括在else内,否则seq_len不能表示sequence的真实长度
</issue>
<code>
[start of PaddleNLP/emotion_detection/utils.py]
1 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 EmoTect utilities.
16 """
17 from __future__ import absolute_import
18 from __future__ import division
19 from __future__ import print_function
20
21 import io
22 import os
23 import sys
24 import six
25 import random
26
27 import paddle
28 import paddle.fluid as fluid
29 import numpy as np
30
31
32 def init_checkpoint(exe, init_checkpoint_path, main_program):
33 """
34 Init CheckPoint
35 """
36
37 fluid.load(main_program, init_checkpoint_path, exe)
38
39
40 def word2id(word_dict, query):
41 """
42 Convert word sequence into id list
43 """
44 unk_id = len(word_dict)
45 wids = [
46 word_dict[w] if w in word_dict else unk_id
47 for w in query.strip().split(" ")
48 ]
49 return wids
50
51
52 def pad_wid(wids, max_seq_len=128, pad_id=0):
53 """
54 Padding data to max_seq_len
55 """
56 seq_len = len(wids)
57 if seq_len < max_seq_len:
58 for i in range(max_seq_len - seq_len):
59 wids.append(pad_id)
60 else:
61 wids = wids[:max_seq_len]
62 seq_len = max_seq_len
63 return wids, seq_len
64
65
66 def data_reader(file_path, word_dict, num_examples, phrase, epoch, max_seq_len):
67 """
68 Data reader, which convert word sequence into id list
69 """
70 all_data = []
71 with io.open(file_path, "r", encoding='utf8') as fin:
72 for line in fin:
73 if line.startswith("label"):
74 continue
75 if phrase == "infer":
76 cols = line.strip().split("\t")
77 query = cols[-1] if len(cols) != -1 else cols[0]
78 wids = word2id(word_dict, query)
79 wids, seq_len = pad_wid(wids, max_seq_len)
80 all_data.append((wids, seq_len))
81 else:
82 cols = line.strip().split("\t")
83 if len(cols) != 2:
84 sys.stderr.write("[NOTICE] Error Format Line!")
85 continue
86 label = int(cols[0])
87 query = cols[1].strip()
88 wids = word2id(word_dict, query)
89 wids, seq_len = pad_wid(wids, max_seq_len)
90 all_data.append((wids, label, seq_len))
91 num_examples[phrase] = len(all_data)
92
93 if phrase == "infer":
94
95 def reader():
96 """
97 Infer reader function
98 """
99 for wids, seq_len in all_data:
100 yield wids, seq_len
101
102 return reader
103
104 def reader():
105 """
106 Reader function
107 """
108 for idx in range(epoch):
109 if phrase == "train" and 'ce_mode' not in os.environ:
110 random.shuffle(all_data)
111 for wids, label, seq_len in all_data:
112 yield wids, label, seq_len
113
114 return reader
115
116
117 def load_vocab(file_path):
118 """
119 load the given vocabulary
120 """
121 vocab = {}
122 with io.open(file_path, 'r', encoding='utf8') as fin:
123 wid = 0
124 for line in fin:
125 if line.strip() not in vocab:
126 vocab[line.strip()] = wid
127 wid += 1
128 vocab["<unk>"] = len(vocab)
129 return vocab
130
131
132 def print_arguments(args):
133 """
134 print arguments
135 """
136 print('----------- Configuration Arguments -----------')
137 for arg, value in sorted(six.iteritems(vars(args))):
138 print('%s: %s' % (arg, value))
139 print('------------------------------------------------')
140
141
142 def query2ids(vocab_path, query):
143 """
144 Convert query to id list according to the given vocab
145 """
146 vocab = load_vocab(vocab_path)
147 wids = word2id(vocab, query)
148 return wids
149
[end of PaddleNLP/emotion_detection/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PaddleNLP/emotion_detection/utils.py b/PaddleNLP/emotion_detection/utils.py
--- a/PaddleNLP/emotion_detection/utils.py
+++ b/PaddleNLP/emotion_detection/utils.py
@@ -59,7 +59,7 @@
wids.append(pad_id)
else:
wids = wids[:max_seq_len]
- seq_len = max_seq_len
+ seq_len = max_seq_len
return wids, seq_len
|
{"golden_diff": "diff --git a/PaddleNLP/emotion_detection/utils.py b/PaddleNLP/emotion_detection/utils.py\n--- a/PaddleNLP/emotion_detection/utils.py\n+++ b/PaddleNLP/emotion_detection/utils.py\n@@ -59,7 +59,7 @@\n wids.append(pad_id)\n else:\n wids = wids[:max_seq_len]\n- seq_len = max_seq_len\n+ seq_len = max_seq_len\n return wids, seq_len\n", "issue": "emotion_detection/utils.py\u4ee3\u7801\u9519\u8bef\nutils.py\u7b2c62\u884c\uff1aseq_len = max_seq_len \u662f\u7f29\u8fdb\u9519\u8bef\u5427\uff1f\u5e94\u8be5\u5305\u62ec\u5728else\u5185\uff0c\u5426\u5219seq_len\u4e0d\u80fd\u8868\u793asequence\u7684\u771f\u5b9e\u957f\u5ea6\n", "before_files": [{"content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nEmoTect utilities.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\nimport sys\nimport six\nimport random\n\nimport paddle\nimport paddle.fluid as fluid\nimport numpy as np\n\n\ndef init_checkpoint(exe, init_checkpoint_path, main_program):\n \"\"\"\n Init CheckPoint\n \"\"\"\n\n fluid.load(main_program, init_checkpoint_path, exe)\n\n\ndef word2id(word_dict, query):\n \"\"\"\n Convert word sequence into id list\n \"\"\"\n unk_id = len(word_dict)\n wids = [\n word_dict[w] if w in word_dict else unk_id\n for w in query.strip().split(\" \")\n ]\n return wids\n\n\ndef pad_wid(wids, max_seq_len=128, pad_id=0):\n \"\"\"\n Padding data to max_seq_len\n \"\"\"\n seq_len = len(wids)\n if seq_len < max_seq_len:\n for i in range(max_seq_len - seq_len):\n wids.append(pad_id)\n else:\n wids = wids[:max_seq_len]\n seq_len = max_seq_len\n return wids, seq_len\n\n\ndef data_reader(file_path, word_dict, num_examples, phrase, epoch, max_seq_len):\n \"\"\"\n Data reader, which convert word sequence into id list\n \"\"\"\n all_data = []\n with io.open(file_path, \"r\", encoding='utf8') as fin:\n for line in fin:\n if line.startswith(\"label\"):\n continue\n if phrase == \"infer\":\n cols = line.strip().split(\"\\t\")\n query = cols[-1] if len(cols) != -1 else cols[0]\n wids = word2id(word_dict, query)\n wids, seq_len = pad_wid(wids, max_seq_len)\n all_data.append((wids, seq_len))\n else:\n cols = line.strip().split(\"\\t\")\n if len(cols) != 2:\n sys.stderr.write(\"[NOTICE] Error Format Line!\")\n continue\n label = int(cols[0])\n query = cols[1].strip()\n wids = word2id(word_dict, query)\n wids, seq_len = pad_wid(wids, max_seq_len)\n all_data.append((wids, label, seq_len))\n num_examples[phrase] = len(all_data)\n\n if phrase == \"infer\":\n\n def reader():\n \"\"\"\n Infer reader function\n \"\"\"\n for wids, seq_len in all_data:\n yield wids, seq_len\n\n return reader\n\n def reader():\n \"\"\"\n Reader function\n \"\"\"\n for idx in range(epoch):\n if phrase == \"train\" and 'ce_mode' not in os.environ:\n random.shuffle(all_data)\n for wids, label, seq_len in all_data:\n yield wids, label, seq_len\n\n return reader\n\n\ndef load_vocab(file_path):\n \"\"\"\n load the given vocabulary\n \"\"\"\n vocab = {}\n with io.open(file_path, 'r', encoding='utf8') as fin:\n wid = 0\n for line in fin:\n if line.strip() not in vocab:\n vocab[line.strip()] = wid\n wid += 1\n vocab[\"<unk>\"] = len(vocab)\n return vocab\n\n\ndef print_arguments(args):\n \"\"\"\n print arguments\n \"\"\"\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(six.iteritems(vars(args))):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\ndef query2ids(vocab_path, query):\n \"\"\"\n Convert query to id list according to the given vocab\n \"\"\"\n vocab = load_vocab(vocab_path)\n wids = word2id(vocab, query)\n return wids\n", "path": "PaddleNLP/emotion_detection/utils.py"}]}
| 1,910 | 107 |
gh_patches_debug_338
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-1865
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Read the Docs build failing
https://readthedocs.org/projects/gcloud-python/builds/4108022/
https://readthedocs.org/projects/gcloud-python/builds/4108027/
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3
4 from setuptools import setup
5 from setuptools import find_packages
6
7 here = os.path.abspath(os.path.dirname(__file__))
8
9
10 with open(os.path.join(here, 'README.rst')) as f:
11 README = f.read()
12
13
14 REQUIREMENTS = [
15 'httplib2 >= 0.9.1',
16 'googleapis-common-protos',
17 'oauth2client >= 2.0.1',
18 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1',
19 'six',
20 ]
21
22 GRPC_EXTRAS = [
23 'grpcio >= 0.14.0',
24 'google-gax >= 0.12.0',
25 'gax-google-pubsub-v1',
26 ]
27
28 if sys.version_info[:2] == (2, 7):
29 REQUIREMENTS.extend(GRPC_EXTRAS)
30
31 setup(
32 name='gcloud',
33 version='0.15.0',
34 description='API Client library for Google Cloud',
35 author='Google Cloud Platform',
36 author_email='[email protected]',
37 long_description=README,
38 scripts=[],
39 url='https://github.com/GoogleCloudPlatform/gcloud-python',
40 packages=find_packages(),
41 license='Apache 2.0',
42 platforms='Posix; MacOS X; Windows',
43 include_package_data=True,
44 zip_safe=False,
45 install_requires=REQUIREMENTS,
46 extras_require={'grpc': GRPC_EXTRAS},
47 classifiers=[
48 'Development Status :: 4 - Beta',
49 'Intended Audience :: Developers',
50 'License :: OSI Approved :: Apache Software License',
51 'Operating System :: OS Independent',
52 'Programming Language :: Python :: 2',
53 'Programming Language :: Python :: 2.6',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.4',
57 'Programming Language :: Python :: 3.5',
58 'Topic :: Internet',
59 ]
60 )
61
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
'gax-google-pubsub-v1',
]
-if sys.version_info[:2] == (2, 7):
+if sys.version_info[:2] == (2, 7) and 'READTHEDOCS' not in os.environ:
REQUIREMENTS.extend(GRPC_EXTRAS)
setup(
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,7 +25,7 @@\n 'gax-google-pubsub-v1',\n ]\n \n-if sys.version_info[:2] == (2, 7):\n+if sys.version_info[:2] == (2, 7) and 'READTHEDOCS' not in os.environ:\n REQUIREMENTS.extend(GRPC_EXTRAS)\n \n setup(\n", "issue": "Read the Docs build failing\nhttps://readthedocs.org/projects/gcloud-python/builds/4108022/\nhttps://readthedocs.org/projects/gcloud-python/builds/4108027/\n\n", "before_files": [{"content": "import os\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\n\nREQUIREMENTS = [\n 'httplib2 >= 0.9.1',\n 'googleapis-common-protos',\n 'oauth2client >= 2.0.1',\n 'protobuf >= 3.0.0b2, != 3.0.0.b2.post1',\n 'six',\n]\n\nGRPC_EXTRAS = [\n 'grpcio >= 0.14.0',\n 'google-gax >= 0.12.0',\n 'gax-google-pubsub-v1',\n]\n\nif sys.version_info[:2] == (2, 7):\n REQUIREMENTS.extend(GRPC_EXTRAS)\n\nsetup(\n name='gcloud',\n version='0.15.0',\n description='API Client library for Google Cloud',\n author='Google Cloud Platform',\n author_email='[email protected]',\n long_description=README,\n scripts=[],\n url='https://github.com/GoogleCloudPlatform/gcloud-python',\n packages=find_packages(),\n license='Apache 2.0',\n platforms='Posix; MacOS X; Windows',\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={'grpc': GRPC_EXTRAS},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet',\n ]\n)\n", "path": "setup.py"}]}
| 1,135 | 100 |
gh_patches_debug_60829
|
rasdani/github-patches
|
git_diff
|
microsoft__AzureTRE-1754
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release version 0.3
## Description
As a TRE developer
I want to release current code base as version 0.3
So that people can use a more stable version going forward
## Acceptance criteria
- [ ] All core apps are bumped to 0.3
- [ ] All bundles are bumped to 0.3
- [ ] A tag is created
- [ ] A release is created
</issue>
<code>
[start of api_app/_version.py]
1 __version__ = "0.2.28"
2
[end of api_app/_version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/api_app/_version.py b/api_app/_version.py
--- a/api_app/_version.py
+++ b/api_app/_version.py
@@ -1 +1 @@
-__version__ = "0.2.28"
+__version__ = "0.3.0"
|
{"golden_diff": "diff --git a/api_app/_version.py b/api_app/_version.py\n--- a/api_app/_version.py\n+++ b/api_app/_version.py\n@@ -1 +1 @@\n-__version__ = \"0.2.28\"\n+__version__ = \"0.3.0\"\n", "issue": "Release version 0.3\n## Description\r\n\r\nAs a TRE developer \r\nI want to release current code base as version 0.3\r\nSo that people can use a more stable version going forward\r\n\r\n## Acceptance criteria\r\n\r\n- [ ] All core apps are bumped to 0.3\r\n- [ ] All bundles are bumped to 0.3\r\n- [ ] A tag is created\r\n- [ ] A release is created\r\n\n", "before_files": [{"content": "__version__ = \"0.2.28\"\n", "path": "api_app/_version.py"}]}
| 639 | 63 |
gh_patches_debug_26379
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-1572
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix bytes/unicode issue to fix test on Python 3
</issue>
<code>
[start of master/buildbot/db/schedulers.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import sqlalchemy as sa
17 import sqlalchemy.exc
18
19 from buildbot.db import NULL
20 from buildbot.db import base
21 from twisted.internet import defer
22
23
24 class SchedulerAlreadyClaimedError(Exception):
25 pass
26
27
28 class SchedulersConnectorComponent(base.DBConnectorComponent):
29 # Documentation is in developer/db.rst
30
31 def classifyChanges(self, schedulerid, classifications):
32 def thd(conn):
33 transaction = conn.begin()
34 tbl = self.db.model.scheduler_changes
35 ins_q = tbl.insert()
36 upd_q = tbl.update(
37 ((tbl.c.schedulerid == schedulerid)
38 & (tbl.c.changeid == sa.bindparam('wc_changeid'))))
39 for changeid, important in classifications.items():
40 # convert the 'important' value into an integer, since that
41 # is the column type
42 imp_int = important and 1 or 0
43 try:
44 conn.execute(ins_q,
45 schedulerid=schedulerid,
46 changeid=changeid,
47 important=imp_int)
48 except (sqlalchemy.exc.ProgrammingError,
49 sqlalchemy.exc.IntegrityError):
50 transaction.rollback()
51 transaction = conn.begin()
52 # insert failed, so try an update
53 conn.execute(upd_q,
54 wc_changeid=changeid,
55 important=imp_int)
56
57 transaction.commit()
58 return self.db.pool.do(thd)
59
60 def flushChangeClassifications(self, schedulerid, less_than=None):
61 def thd(conn):
62 sch_ch_tbl = self.db.model.scheduler_changes
63 wc = (sch_ch_tbl.c.schedulerid == schedulerid)
64 if less_than is not None:
65 wc = wc & (sch_ch_tbl.c.changeid < less_than)
66 q = sch_ch_tbl.delete(whereclause=wc)
67 conn.execute(q)
68 return self.db.pool.do(thd)
69
70 def getChangeClassifications(self, schedulerid, branch=-1,
71 repository=-1, project=-1,
72 codebase=-1):
73 # -1 here stands for "argument not given", since None has meaning
74 # as a branch
75 def thd(conn):
76 sch_ch_tbl = self.db.model.scheduler_changes
77 ch_tbl = self.db.model.changes
78
79 wc = (sch_ch_tbl.c.schedulerid == schedulerid)
80
81 # may need to filter further based on branch, etc
82 extra_wheres = []
83 if branch != -1:
84 extra_wheres.append(ch_tbl.c.branch == branch)
85 if repository != -1:
86 extra_wheres.append(ch_tbl.c.repository == repository)
87 if project != -1:
88 extra_wheres.append(ch_tbl.c.project == project)
89 if codebase != -1:
90 extra_wheres.append(ch_tbl.c.codebase == codebase)
91
92 # if we need to filter further append those, as well as a join
93 # on changeid (but just once for that one)
94 if extra_wheres:
95 wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)
96 for w in extra_wheres:
97 wc &= w
98
99 q = sa.select(
100 [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],
101 whereclause=wc)
102 return dict([(r.changeid, [False, True][r.important])
103 for r in conn.execute(q)])
104 return self.db.pool.do(thd)
105
106 def findSchedulerId(self, name):
107 tbl = self.db.model.schedulers
108 name_hash = self.hashColumns(name)
109 return self.findSomethingId(
110 tbl=tbl,
111 whereclause=(tbl.c.name_hash == name_hash),
112 insert_values=dict(
113 name=name,
114 name_hash=name_hash,
115 ))
116
117 def setSchedulerMaster(self, schedulerid, masterid):
118 def thd(conn):
119 sch_mst_tbl = self.db.model.scheduler_masters
120
121 # handle the masterid=None case to get it out of the way
122 if masterid is None:
123 q = sch_mst_tbl.delete(
124 whereclause=(sch_mst_tbl.c.schedulerid == schedulerid))
125 conn.execute(q)
126 return
127
128 # try a blind insert..
129 try:
130 q = sch_mst_tbl.insert()
131 conn.execute(q,
132 dict(schedulerid=schedulerid, masterid=masterid))
133 except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
134 # someone already owns this scheduler.
135 raise SchedulerAlreadyClaimedError
136
137 return self.db.pool.do(thd)
138
139 @defer.inlineCallbacks
140 def getScheduler(self, schedulerid):
141 sch = yield self.getSchedulers(_schedulerid=schedulerid)
142 if sch:
143 defer.returnValue(sch[0])
144
145 def getSchedulers(self, active=None, masterid=None, _schedulerid=None):
146 def thd(conn):
147 sch_tbl = self.db.model.schedulers
148 sch_mst_tbl = self.db.model.scheduler_masters
149
150 # handle the trivial case of masterid=xx and active=False
151 if masterid is not None and active is not None and not active:
152 return []
153
154 join = sch_tbl.outerjoin(sch_mst_tbl,
155 (sch_tbl.c.id == sch_mst_tbl.c.schedulerid))
156
157 # if we're given a _schedulerid, select only that row
158 wc = None
159 if _schedulerid:
160 wc = (sch_tbl.c.id == _schedulerid)
161 else:
162 # otherwise, filter with active, if necessary
163 if masterid is not None:
164 wc = (sch_mst_tbl.c.masterid == masterid)
165 elif active:
166 wc = (sch_mst_tbl.c.masterid != NULL)
167 elif active is not None:
168 wc = (sch_mst_tbl.c.masterid == NULL)
169
170 q = sa.select([sch_tbl.c.id, sch_tbl.c.name,
171 sch_mst_tbl.c.masterid],
172 from_obj=join, whereclause=wc)
173
174 return [dict(id=row.id, name=row.name,
175 masterid=row.masterid)
176 for row in conn.execute(q).fetchall()]
177 return self.db.pool.do(thd)
178
[end of master/buildbot/db/schedulers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py
--- a/master/buildbot/db/schedulers.py
+++ b/master/buildbot/db/schedulers.py
@@ -30,13 +30,13 @@
def classifyChanges(self, schedulerid, classifications):
def thd(conn):
- transaction = conn.begin()
tbl = self.db.model.scheduler_changes
ins_q = tbl.insert()
upd_q = tbl.update(
((tbl.c.schedulerid == schedulerid)
& (tbl.c.changeid == sa.bindparam('wc_changeid'))))
for changeid, important in classifications.items():
+ transaction = conn.begin()
# convert the 'important' value into an integer, since that
# is the column type
imp_int = important and 1 or 0
@@ -54,7 +54,7 @@
wc_changeid=changeid,
important=imp_int)
- transaction.commit()
+ transaction.commit()
return self.db.pool.do(thd)
def flushChangeClassifications(self, schedulerid, less_than=None):
|
{"golden_diff": "diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py\n--- a/master/buildbot/db/schedulers.py\n+++ b/master/buildbot/db/schedulers.py\n@@ -30,13 +30,13 @@\n \n def classifyChanges(self, schedulerid, classifications):\n def thd(conn):\n- transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.schedulerid == schedulerid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n+ transaction = conn.begin()\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n@@ -54,7 +54,7 @@\n wc_changeid=changeid,\n important=imp_int)\n \n- transaction.commit()\n+ transaction.commit()\n return self.db.pool.do(thd)\n \n def flushChangeClassifications(self, schedulerid, less_than=None):\n", "issue": "Fix bytes/unicode issue to fix test on Python 3\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\n\nfrom buildbot.db import NULL\nfrom buildbot.db import base\nfrom twisted.internet import defer\n\n\nclass SchedulerAlreadyClaimedError(Exception):\n pass\n\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/db.rst\n\n def classifyChanges(self, schedulerid, classifications):\n def thd(conn):\n transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.schedulerid == schedulerid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n schedulerid=schedulerid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n transaction.rollback()\n transaction = conn.begin()\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, schedulerid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.schedulerid == schedulerid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n def getChangeClassifications(self, schedulerid, branch=-1,\n repository=-1, project=-1,\n codebase=-1):\n # -1 here stands for \"argument not given\", since None has meaning\n # as a branch\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.schedulerid == schedulerid)\n\n # may need to filter further based on branch, etc\n extra_wheres = []\n if branch != -1:\n extra_wheres.append(ch_tbl.c.branch == branch)\n if repository != -1:\n extra_wheres.append(ch_tbl.c.repository == repository)\n if project != -1:\n extra_wheres.append(ch_tbl.c.project == project)\n if codebase != -1:\n extra_wheres.append(ch_tbl.c.codebase == codebase)\n\n # if we need to filter further append those, as well as a join\n # on changeid (but just once for that one)\n if extra_wheres:\n wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n for w in extra_wheres:\n wc &= w\n\n q = sa.select(\n [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],\n whereclause=wc)\n return dict([(r.changeid, [False, True][r.important])\n for r in conn.execute(q)])\n return self.db.pool.do(thd)\n\n def findSchedulerId(self, name):\n tbl = self.db.model.schedulers\n name_hash = self.hashColumns(name)\n return self.findSomethingId(\n tbl=tbl,\n whereclause=(tbl.c.name_hash == name_hash),\n insert_values=dict(\n name=name,\n name_hash=name_hash,\n ))\n\n def setSchedulerMaster(self, schedulerid, masterid):\n def thd(conn):\n sch_mst_tbl = self.db.model.scheduler_masters\n\n # handle the masterid=None case to get it out of the way\n if masterid is None:\n q = sch_mst_tbl.delete(\n whereclause=(sch_mst_tbl.c.schedulerid == schedulerid))\n conn.execute(q)\n return\n\n # try a blind insert..\n try:\n q = sch_mst_tbl.insert()\n conn.execute(q,\n dict(schedulerid=schedulerid, masterid=masterid))\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n # someone already owns this scheduler.\n raise SchedulerAlreadyClaimedError\n\n return self.db.pool.do(thd)\n\n @defer.inlineCallbacks\n def getScheduler(self, schedulerid):\n sch = yield self.getSchedulers(_schedulerid=schedulerid)\n if sch:\n defer.returnValue(sch[0])\n\n def getSchedulers(self, active=None, masterid=None, _schedulerid=None):\n def thd(conn):\n sch_tbl = self.db.model.schedulers\n sch_mst_tbl = self.db.model.scheduler_masters\n\n # handle the trivial case of masterid=xx and active=False\n if masterid is not None and active is not None and not active:\n return []\n\n join = sch_tbl.outerjoin(sch_mst_tbl,\n (sch_tbl.c.id == sch_mst_tbl.c.schedulerid))\n\n # if we're given a _schedulerid, select only that row\n wc = None\n if _schedulerid:\n wc = (sch_tbl.c.id == _schedulerid)\n else:\n # otherwise, filter with active, if necessary\n if masterid is not None:\n wc = (sch_mst_tbl.c.masterid == masterid)\n elif active:\n wc = (sch_mst_tbl.c.masterid != NULL)\n elif active is not None:\n wc = (sch_mst_tbl.c.masterid == NULL)\n\n q = sa.select([sch_tbl.c.id, sch_tbl.c.name,\n sch_mst_tbl.c.masterid],\n from_obj=join, whereclause=wc)\n\n return [dict(id=row.id, name=row.name,\n masterid=row.masterid)\n for row in conn.execute(q).fetchall()]\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}]}
| 2,438 | 242 |
gh_patches_debug_3399
|
rasdani/github-patches
|
git_diff
|
ansible__molecule-2308
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"devices" support in docker driver
# Issue Type
- Feature request
# Molecule and Ansible details
```
$ ansible --version && molecule --version
ansible 2.8.5
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sylvain/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/sylvain/venv/molecule/lib/python3.6/site-packages/ansible
executable location = /home/sylvain/venv/molecule/bin/ansible
python version = 3.6.8 (default, Aug 20 2019, 17:12:48) [GCC 8.3.0]
molecule, version 2.22
```
Molecule installation method (one of):
- pip
Ansible installation method (one of):
- pip
# Desired Behavior
The docker driver does not support the `devices` option available in the `docker_container` ansible module, so we can't test roles that are using fuse for example, except by using `privileged` option which is quite dangerous. Can you please support for this option?
"devices" support in docker driver
# Issue Type
- Feature request
# Molecule and Ansible details
```
$ ansible --version && molecule --version
ansible 2.8.5
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sylvain/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/sylvain/venv/molecule/lib/python3.6/site-packages/ansible
executable location = /home/sylvain/venv/molecule/bin/ansible
python version = 3.6.8 (default, Aug 20 2019, 17:12:48) [GCC 8.3.0]
molecule, version 2.22
```
Molecule installation method (one of):
- pip
Ansible installation method (one of):
- pip
# Desired Behavior
The docker driver does not support the `devices` option available in the `docker_container` ansible module, so we can't test roles that are using fuse for example, except by using `privileged` option which is quite dangerous. Can you please support for this option?
</issue>
<code>
[start of molecule/driver/docker.py]
1 # Copyright (c) 2015-2018 Cisco Systems, Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to
5 # deal in the Software without restriction, including without limitation the
6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from __future__ import absolute_import
22
23 import os
24
25 from molecule import logger
26 from molecule.api import Driver
27 from molecule.util import lru_cache
28 from molecule.util import sysexit_with_message
29
30 log = logger.get_logger(__name__)
31
32
33 class Docker(Driver):
34 """
35 The class responsible for managing `Docker`_ containers. `Docker`_ is
36 the default driver used in Molecule.
37
38 Molecule leverages Ansible's `docker_container`_ module, by mapping
39 variables from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``.
40
41 .. _`docker_container`: https://docs.ansible.com/ansible/latest/docker_container_module.html
42 .. _`Docker Security Configuration`: https://docs.docker.com/engine/reference/run/#security-configuration
43 .. _`Docker daemon socket options`: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option
44
45 .. code-block:: yaml
46
47 driver:
48 name: docker
49 platforms:
50 - name: instance
51 hostname: instance
52 image: image_name:tag
53 dockerfile: Dockerfile.j2
54 pull: True|False
55 pre_build_image: True|False
56 registry:
57 url: registry.example.com
58 credentials:
59 username: $USERNAME
60 password: $PASSWORD
61 email: [email protected]
62 user: root
63 override_command: True|False
64 command: sleep infinity
65 tty: True|False
66 pid_mode: host
67 privileged: True|False
68 security_opts:
69 - seccomp=unconfined
70 volumes:
71 - /sys/fs/cgroup:/sys/fs/cgroup:ro
72 keep_volumes: True|False
73 tmpfs:
74 - /tmp
75 - /run
76 capabilities:
77 - SYS_ADMIN
78 sysctls:
79 net.core.somaxconn: 1024
80 net.ipv4.tcp_syncookies: 0
81 exposed_ports:
82 - 53/udp
83 - 53/tcp
84 published_ports:
85 - 0.0.0.0:8053:53/udp
86 - 0.0.0.0:8053:53/tcp
87 ulimits:
88 - nofile:262144:262144
89 dns_servers:
90 - 8.8.8.8
91 etc_hosts: "{'host1.example.com': '10.3.1.5'}"
92 networks:
93 - name: foo
94 - name: bar
95 network_mode: host
96 purge_networks: true
97 docker_host: tcp://localhost:12376
98 cacert_path: /foo/bar/ca.pem
99 cert_path: /foo/bar/cert.pem
100 key_path: /foo/bar/key.pem
101 tls_verify: true
102 env:
103 FOO: bar
104 restart_policy: on-failure
105 restart_retries: 1
106 buildargs:
107 http_proxy: http://proxy.example.com:8080/
108
109 If specifying the `CMD`_ directive in your ``Dockerfile.j2`` or consuming a
110 built image which declares a ``CMD`` directive, then you must set
111 ``override_command: False``. Otherwise, Molecule takes care to honour the
112 value of the ``command`` key or uses the default of ``bash -c "while true;
113 do sleep 10000; done"`` to run the container until it is provisioned.
114
115 When attempting to utilize a container image with `systemd`_ as your init
116 system inside the container to simulate a real machine, make sure to set
117 the ``privileged``, ``volumes``, ``command``, and ``environment``
118 values. An example using the ``centos:7`` image is below:
119
120 .. note:: Do note that running containers in privileged mode is considerably
121 less secure. For details, please reference `Docker Security
122 Configuration`_
123
124 .. note:: With the environment variable ``DOCKER_HOST`` the user can bind
125 Molecule to a different `Docker`_ socket than the default
126 ``unix:///var/run/docker.sock``. ``tcp``, ``fd`` and ``ssh``
127 socket types can be configured. For details, please reference
128 `Docker daemon socket options`_.
129
130 .. code-block:: yaml
131
132 platforms:
133 - name: instance
134 image: centos:7
135 privileged: true
136 volumes:
137 - "/sys/fs/cgroup:/sys/fs/cgroup:rw"
138 command: "/usr/sbin/init"
139 tty: True
140 environment:
141 container: docker
142
143 .. code-block:: bash
144
145 $ pip install molecule[docker]
146
147 When pulling from a private registry, it is the user's discretion to decide
148 whether to use hard-code strings or environment variables for passing
149 credentials to molecule.
150
151 .. important::
152
153 Hard-coded credentials in ``molecule.yml`` should be avoided, instead use
154 `variable substitution`_.
155
156 Provide a list of files Molecule will preserve, relative to the scenario
157 ephemeral directory, after any ``destroy`` subcommand execution.
158
159 .. code-block:: yaml
160
161 driver:
162 name: docker
163 safe_files:
164 - foo
165
166 .. _`Docker`: https://www.docker.com
167 .. _`systemd`: https://www.freedesktop.org/wiki/Software/systemd/
168 .. _`CMD`: https://docs.docker.com/engine/reference/builder/#cmd
169 """ # noqa
170
171 def __init__(self, config=None):
172 super(Docker, self).__init__(config)
173 self._name = 'docker'
174
175 @property
176 def name(self):
177 return self._name
178
179 @name.setter
180 def name(self, value):
181 self._name = value
182
183 @property
184 def login_cmd_template(self):
185 return (
186 'docker exec '
187 '-e COLUMNS={columns} '
188 '-e LINES={lines} '
189 '-e TERM=bash '
190 '-e TERM=xterm '
191 '-ti {instance} bash'
192 )
193
194 @property
195 def default_safe_files(self):
196 return [os.path.join(self._config.scenario.ephemeral_directory, 'Dockerfile')]
197
198 @property
199 def default_ssh_connection_options(self):
200 return []
201
202 def login_options(self, instance_name):
203 return {'instance': instance_name}
204
205 def ansible_connection_options(self, instance_name):
206 return {'ansible_connection': 'docker'}
207
208 @lru_cache()
209 def sanity_checks(self):
210 """Implement Docker driver sanity checks."""
211
212 log.info("Sanity checks: '{}'".format(self._name))
213
214 try:
215 # ansible >= 2.8
216 from ansible.module_utils.docker.common import HAS_DOCKER_PY
217 except ImportError:
218 # ansible < 2.8
219 from ansible.module_utils.docker_common import HAS_DOCKER_PY
220
221 if not HAS_DOCKER_PY:
222 msg = (
223 'Missing Docker driver dependency. Please '
224 "install via 'molecule[docker]' or refer to "
225 'your INSTALL.rst driver documentation file'
226 )
227 sysexit_with_message(msg)
228
229 try:
230 import docker
231 import requests
232
233 docker_client = docker.from_env()
234 docker_client.ping()
235 except requests.exceptions.ConnectionError:
236 msg = (
237 'Unable to contact the Docker daemon. '
238 'Please refer to https://docs.docker.com/config/daemon/ '
239 'for managing the daemon'
240 )
241 sysexit_with_message(msg)
242
[end of molecule/driver/docker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/molecule/driver/docker.py b/molecule/driver/docker.py
--- a/molecule/driver/docker.py
+++ b/molecule/driver/docker.py
@@ -67,6 +67,8 @@
privileged: True|False
security_opts:
- seccomp=unconfined
+ devices:
+ - /dev/fuse:/dev/fuse:rwm
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
keep_volumes: True|False
|
{"golden_diff": "diff --git a/molecule/driver/docker.py b/molecule/driver/docker.py\n--- a/molecule/driver/docker.py\n+++ b/molecule/driver/docker.py\n@@ -67,6 +67,8 @@\n privileged: True|False\n security_opts:\n - seccomp=unconfined\n+ devices:\n+ - /dev/fuse:/dev/fuse:rwm\n volumes:\n - /sys/fs/cgroup:/sys/fs/cgroup:ro\n keep_volumes: True|False\n", "issue": "\"devices\" support in docker driver\n# Issue Type\r\n\r\n- Feature request\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\n$ ansible --version && molecule --version\r\nansible 2.8.5\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/home/sylvain/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/sylvain/venv/molecule/lib/python3.6/site-packages/ansible\r\n executable location = /home/sylvain/venv/molecule/bin/ansible\r\n python version = 3.6.8 (default, Aug 20 2019, 17:12:48) [GCC 8.3.0]\r\nmolecule, version 2.22\r\n```\r\n\r\nMolecule installation method (one of):\r\n\r\n- pip\r\n\r\nAnsible installation method (one of):\r\n\r\n- pip\r\n\r\n# Desired Behavior\r\n\r\nThe docker driver does not support the `devices` option available in the `docker_container` ansible module, so we can't test roles that are using fuse for example, except by using `privileged` option which is quite dangerous. Can you please support for this option?\r\n\n\"devices\" support in docker driver\n# Issue Type\r\n\r\n- Feature request\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\n$ ansible --version && molecule --version\r\nansible 2.8.5\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = ['/home/sylvain/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/sylvain/venv/molecule/lib/python3.6/site-packages/ansible\r\n executable location = /home/sylvain/venv/molecule/bin/ansible\r\n python version = 3.6.8 (default, Aug 20 2019, 17:12:48) [GCC 8.3.0]\r\nmolecule, version 2.22\r\n```\r\n\r\nMolecule installation method (one of):\r\n\r\n- pip\r\n\r\nAnsible installation method (one of):\r\n\r\n- pip\r\n\r\n# Desired Behavior\r\n\r\nThe docker driver does not support the `devices` option available in the `docker_container` ansible module, so we can't test roles that are using fuse for example, except by using `privileged` option which is quite dangerous. Can you please support for this option?\r\n\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import absolute_import\n\nimport os\n\nfrom molecule import logger\nfrom molecule.api import Driver\nfrom molecule.util import lru_cache\nfrom molecule.util import sysexit_with_message\n\nlog = logger.get_logger(__name__)\n\n\nclass Docker(Driver):\n \"\"\"\n The class responsible for managing `Docker`_ containers. `Docker`_ is\n the default driver used in Molecule.\n\n Molecule leverages Ansible's `docker_container`_ module, by mapping\n variables from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``.\n\n .. _`docker_container`: https://docs.ansible.com/ansible/latest/docker_container_module.html\n .. _`Docker Security Configuration`: https://docs.docker.com/engine/reference/run/#security-configuration\n .. _`Docker daemon socket options`: https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-socket-option\n\n .. code-block:: yaml\n\n driver:\n name: docker\n platforms:\n - name: instance\n hostname: instance\n image: image_name:tag\n dockerfile: Dockerfile.j2\n pull: True|False\n pre_build_image: True|False\n registry:\n url: registry.example.com\n credentials:\n username: $USERNAME\n password: $PASSWORD\n email: [email protected]\n user: root\n override_command: True|False\n command: sleep infinity\n tty: True|False\n pid_mode: host\n privileged: True|False\n security_opts:\n - seccomp=unconfined\n volumes:\n - /sys/fs/cgroup:/sys/fs/cgroup:ro\n keep_volumes: True|False\n tmpfs:\n - /tmp\n - /run\n capabilities:\n - SYS_ADMIN\n sysctls:\n net.core.somaxconn: 1024\n net.ipv4.tcp_syncookies: 0\n exposed_ports:\n - 53/udp\n - 53/tcp\n published_ports:\n - 0.0.0.0:8053:53/udp\n - 0.0.0.0:8053:53/tcp\n ulimits:\n - nofile:262144:262144\n dns_servers:\n - 8.8.8.8\n etc_hosts: \"{'host1.example.com': '10.3.1.5'}\"\n networks:\n - name: foo\n - name: bar\n network_mode: host\n purge_networks: true\n docker_host: tcp://localhost:12376\n cacert_path: /foo/bar/ca.pem\n cert_path: /foo/bar/cert.pem\n key_path: /foo/bar/key.pem\n tls_verify: true\n env:\n FOO: bar\n restart_policy: on-failure\n restart_retries: 1\n buildargs:\n http_proxy: http://proxy.example.com:8080/\n\n If specifying the `CMD`_ directive in your ``Dockerfile.j2`` or consuming a\n built image which declares a ``CMD`` directive, then you must set\n ``override_command: False``. Otherwise, Molecule takes care to honour the\n value of the ``command`` key or uses the default of ``bash -c \"while true;\n do sleep 10000; done\"`` to run the container until it is provisioned.\n\n When attempting to utilize a container image with `systemd`_ as your init\n system inside the container to simulate a real machine, make sure to set\n the ``privileged``, ``volumes``, ``command``, and ``environment``\n values. An example using the ``centos:7`` image is below:\n\n .. note:: Do note that running containers in privileged mode is considerably\n less secure. For details, please reference `Docker Security\n Configuration`_\n\n .. note:: With the environment variable ``DOCKER_HOST`` the user can bind\n Molecule to a different `Docker`_ socket than the default\n ``unix:///var/run/docker.sock``. ``tcp``, ``fd`` and ``ssh``\n socket types can be configured. For details, please reference\n `Docker daemon socket options`_.\n\n .. code-block:: yaml\n\n platforms:\n - name: instance\n image: centos:7\n privileged: true\n volumes:\n - \"/sys/fs/cgroup:/sys/fs/cgroup:rw\"\n command: \"/usr/sbin/init\"\n tty: True\n environment:\n container: docker\n\n .. code-block:: bash\n\n $ pip install molecule[docker]\n\n When pulling from a private registry, it is the user's discretion to decide\n whether to use hard-code strings or environment variables for passing\n credentials to molecule.\n\n .. important::\n\n Hard-coded credentials in ``molecule.yml`` should be avoided, instead use\n `variable substitution`_.\n\n Provide a list of files Molecule will preserve, relative to the scenario\n ephemeral directory, after any ``destroy`` subcommand execution.\n\n .. code-block:: yaml\n\n driver:\n name: docker\n safe_files:\n - foo\n\n .. _`Docker`: https://www.docker.com\n .. _`systemd`: https://www.freedesktop.org/wiki/Software/systemd/\n .. _`CMD`: https://docs.docker.com/engine/reference/builder/#cmd\n \"\"\" # noqa\n\n def __init__(self, config=None):\n super(Docker, self).__init__(config)\n self._name = 'docker'\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n\n @property\n def login_cmd_template(self):\n return (\n 'docker exec '\n '-e COLUMNS={columns} '\n '-e LINES={lines} '\n '-e TERM=bash '\n '-e TERM=xterm '\n '-ti {instance} bash'\n )\n\n @property\n def default_safe_files(self):\n return [os.path.join(self._config.scenario.ephemeral_directory, 'Dockerfile')]\n\n @property\n def default_ssh_connection_options(self):\n return []\n\n def login_options(self, instance_name):\n return {'instance': instance_name}\n\n def ansible_connection_options(self, instance_name):\n return {'ansible_connection': 'docker'}\n\n @lru_cache()\n def sanity_checks(self):\n \"\"\"Implement Docker driver sanity checks.\"\"\"\n\n log.info(\"Sanity checks: '{}'\".format(self._name))\n\n try:\n # ansible >= 2.8\n from ansible.module_utils.docker.common import HAS_DOCKER_PY\n except ImportError:\n # ansible < 2.8\n from ansible.module_utils.docker_common import HAS_DOCKER_PY\n\n if not HAS_DOCKER_PY:\n msg = (\n 'Missing Docker driver dependency. Please '\n \"install via 'molecule[docker]' or refer to \"\n 'your INSTALL.rst driver documentation file'\n )\n sysexit_with_message(msg)\n\n try:\n import docker\n import requests\n\n docker_client = docker.from_env()\n docker_client.ping()\n except requests.exceptions.ConnectionError:\n msg = (\n 'Unable to contact the Docker daemon. '\n 'Please refer to https://docs.docker.com/config/daemon/ '\n 'for managing the daemon'\n )\n sysexit_with_message(msg)\n", "path": "molecule/driver/docker.py"}]}
| 3,593 | 114 |
gh_patches_debug_10176
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-7364
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
graph --table=file.html does not show outdated packages
They were previously shown in colors. I guess adding a column "outdated" will resolve this issue.
</issue>
<code>
[start of conans/assets/templates/search_table_html.py]
1
2 content = """
3 <!DOCTYPE html>
4 <html lang="en">
5 <head>
6 <title>Conan | {{ search.reference }}</title>
7 <link rel="stylesheet" type="text/css" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css"/>
8 <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.21/css/dataTables.bootstrap.min.css"/>
9 <style>
10 tr td {
11 white-space:nowrap;
12 }
13 </style>
14 </head>
15 <body>
16 <div class="container-fluid">
17 <h1>{{ search.reference }}</h1>
18 <div class="info">
19 <p>
20 Depending on your package_id_mode, any combination of settings, options and requirements
21 can give you a different packageID. Take into account that your configuration might be
22 different from the one used to generate the packages.
23 </p>
24 </div>
25
26 <table id="results" class="table table-striped table-bordered" style="width:100%">
27 <thead>
28 {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}
29 {%- set headers2rows = headers.row(n_rows=2) %}
30 <tr>
31 {%- for category, subheaders in headers2rows %}
32 <th class="text-center" rowspan="{% if subheaders|length == 1 and not subheaders[0] %}2{% else %}1{% endif %}" colspan="{{ subheaders|length }}">
33 {{ category }}
34 </th>
35 {%- endfor %}
36 </tr>
37 <tr>
38 {%- for category, subheaders in headers2rows %}
39 {%- if subheaders|length != 1 or subheaders[0] != '' %}
40 {%- for subheader in subheaders %}
41 <th>{{ subheader|default(category, true) }}</th>
42 {%- endfor %}
43 {%- endif %}
44 {%- endfor %}
45 </tr>
46 </thead>
47 <tbody>
48 {%- for package in results.packages() %}
49 <tr>
50 {%- for item in package.row(headers) %}
51 <td>{{ item if item != None else ''}}</td>
52 {%- endfor %}
53 </tr>
54 {%- endfor %}
55 </tbody>
56 <tfoot>
57 <tr>
58 {%- for header in headers.row(n_rows=1) %}
59 <th>{{ header }}</th>
60 {%- endfor %}
61 </tr>
62 </tfoot>
63 </table>
64
65 <script type="text/javascript" src="https://code.jquery.com/jquery-3.3.1.js"></script>
66 <script type="text/javascript" src="https://cdn.datatables.net/v/dt/dt-1.10.20/datatables.min.js"></script>
67 <script type="text/javascript" src="https://cdn.datatables.net/1.10.21/js/dataTables.bootstrap.min.js"></script>
68 <script>
69 $(document).ready(function() {
70 // Setup - add a text input to each footer cell
71 $('#results tfoot th').each( function () {
72 var title = $(this).text();
73 $(this).html( '<input type="text" class="form-control filter-input" placeholder="Filter '+title+'" style="width:100%"/>' );
74 });
75
76 var table = $('#results').DataTable( {
77 "dom": "lrtip",
78 "lengthMenu": [[10, 25, 50, -1], [10, 25, 50, "All"]],
79 "pageLength": 10,
80 "columnDefs": [
81 { className: "cell_border_right", "targets": [ {{ headers.keys|length + headers.settings|length -1 }}, {{ headers.keys|length + headers.settings|length + headers.options|length -1 }} ] },
82 { className: "cell_border_right monospaced", "targets": [{{ headers.keys|length -1 }}, ]}
83 ]
84 });
85
86 // Apply the search
87 table.columns().every( function () {
88 var that = this;
89
90 $( 'input', this.footer() ).on( 'keyup change clear', function () {
91 if ( that.search() !== this.value ) {
92 that
93 .search( this.value )
94 .draw();
95 }
96 } );
97 } );
98 });
99 </script>
100 </div>
101 </body>
102 </html>
103 """
104
[end of conans/assets/templates/search_table_html.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/assets/templates/search_table_html.py b/conans/assets/templates/search_table_html.py
--- a/conans/assets/templates/search_table_html.py
+++ b/conans/assets/templates/search_table_html.py
@@ -25,7 +25,7 @@
<table id="results" class="table table-striped table-bordered" style="width:100%">
<thead>
- {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}
+ {%- set headers = results.get_headers(keys=['remote', 'package_id', 'outdated']) %}
{%- set headers2rows = headers.row(n_rows=2) %}
<tr>
{%- for category, subheaders in headers2rows %}
|
{"golden_diff": "diff --git a/conans/assets/templates/search_table_html.py b/conans/assets/templates/search_table_html.py\n--- a/conans/assets/templates/search_table_html.py\n+++ b/conans/assets/templates/search_table_html.py\n@@ -25,7 +25,7 @@\n \n <table id=\"results\" class=\"table table-striped table-bordered\" style=\"width:100%\">\n <thead>\n- {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}\n+ {%- set headers = results.get_headers(keys=['remote', 'package_id', 'outdated']) %}\n {%- set headers2rows = headers.row(n_rows=2) %}\n <tr>\n {%- for category, subheaders in headers2rows %}\n", "issue": "graph --table=file.html does not show outdated packages\nThey were previously shown in colors. I guess adding a column \"outdated\" will resolve this issue.\n", "before_files": [{"content": "\ncontent = \"\"\"\n<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>Conan | {{ search.reference }}</title>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css\"/>\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.datatables.net/1.10.21/css/dataTables.bootstrap.min.css\"/>\n <style>\n tr td {\n white-space:nowrap;\n }\n </style>\n </head>\n <body>\n <div class=\"container-fluid\">\n <h1>{{ search.reference }}</h1>\n <div class=\"info\">\n <p>\n Depending on your package_id_mode, any combination of settings, options and requirements\n can give you a different packageID. Take into account that your configuration might be\n different from the one used to generate the packages.\n </p>\n </div>\n\n <table id=\"results\" class=\"table table-striped table-bordered\" style=\"width:100%\">\n <thead>\n {%- set headers = results.get_headers(keys=['remote', 'package_id']) %}\n {%- set headers2rows = headers.row(n_rows=2) %}\n <tr>\n {%- for category, subheaders in headers2rows %}\n <th class=\"text-center\" rowspan=\"{% if subheaders|length == 1 and not subheaders[0] %}2{% else %}1{% endif %}\" colspan=\"{{ subheaders|length }}\">\n {{ category }}\n </th>\n {%- endfor %}\n </tr>\n <tr>\n {%- for category, subheaders in headers2rows %}\n {%- if subheaders|length != 1 or subheaders[0] != '' %}\n {%- for subheader in subheaders %}\n <th>{{ subheader|default(category, true) }}</th>\n {%- endfor %}\n {%- endif %}\n {%- endfor %}\n </tr>\n </thead>\n <tbody>\n {%- for package in results.packages() %}\n <tr>\n {%- for item in package.row(headers) %}\n <td>{{ item if item != None else ''}}</td>\n {%- endfor %}\n </tr>\n {%- endfor %}\n </tbody>\n <tfoot>\n <tr>\n {%- for header in headers.row(n_rows=1) %}\n <th>{{ header }}</th>\n {%- endfor %}\n </tr>\n </tfoot>\n </table>\n\n <script type=\"text/javascript\" src=\"https://code.jquery.com/jquery-3.3.1.js\"></script>\n <script type=\"text/javascript\" src=\"https://cdn.datatables.net/v/dt/dt-1.10.20/datatables.min.js\"></script>\n <script type=\"text/javascript\" src=\"https://cdn.datatables.net/1.10.21/js/dataTables.bootstrap.min.js\"></script>\n <script>\n $(document).ready(function() {\n // Setup - add a text input to each footer cell\n $('#results tfoot th').each( function () {\n var title = $(this).text();\n $(this).html( '<input type=\"text\" class=\"form-control filter-input\" placeholder=\"Filter '+title+'\" style=\"width:100%\"/>' );\n });\n\n var table = $('#results').DataTable( {\n \"dom\": \"lrtip\",\n \"lengthMenu\": [[10, 25, 50, -1], [10, 25, 50, \"All\"]],\n \"pageLength\": 10,\n \"columnDefs\": [\n { className: \"cell_border_right\", \"targets\": [ {{ headers.keys|length + headers.settings|length -1 }}, {{ headers.keys|length + headers.settings|length + headers.options|length -1 }} ] },\n { className: \"cell_border_right monospaced\", \"targets\": [{{ headers.keys|length -1 }}, ]}\n ]\n });\n\n // Apply the search\n table.columns().every( function () {\n var that = this;\n\n $( 'input', this.footer() ).on( 'keyup change clear', function () {\n if ( that.search() !== this.value ) {\n that\n .search( this.value )\n .draw();\n }\n } );\n } );\n });\n </script>\n </div>\n </body>\n</html>\n\"\"\"\n", "path": "conans/assets/templates/search_table_html.py"}]}
| 1,741 | 159 |
gh_patches_debug_35269
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-853
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jaccard Index behaviour when num_classes is absent
The [jaccard index function](https://github.com/PyTorchLightning/metrics/blob/8fade87062a7b87c1e6429bbe1c4e0112b3713a5/torchmetrics/functional/classification/jaccard.py#L69) determines the number of classes automatically.
This can result in strange results if the user forgets to specify `num_classes`. For example, in IoU determination in semantic segmentation, not all images have all classes. So determining them automatically will almost always lead to wrong number.
Additionally, the [warning](https://github.com/PyTorchLightning/metrics/blob/8fade87062a7b87c1e6429bbe1c4e0112b3713a5/torchmetrics/utilities/data.py#L156) that shows up makes it seem like something is wrong, when the user specifies the number of classes explicitly (but this is not the case at all):
```python
/run/determined/pythonuserbase/lib/python3.7/site-packages/torchmetrics/utilities/prints.py:36: RuntimeWarning: You have set 11 number of classes which is different from predicted (1000) and target (10) number of classes
```
Would it be crazy if `num_classes` requirement is default and the automatic determination of classes is a special option that must be enabled?
</issue>
<code>
[start of torchmetrics/functional/classification/jaccard.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.functional.classification.confusion_matrix import _confusion_matrix_update
20 from torchmetrics.utilities.data import get_num_classes
21 from torchmetrics.utilities.distributed import reduce
22
23
24 def _jaccard_from_confmat(
25 confmat: Tensor,
26 num_classes: int,
27 ignore_index: Optional[int] = None,
28 absent_score: float = 0.0,
29 reduction: str = "elementwise_mean",
30 ) -> Tensor:
31 """Computes the intersection over union from confusion matrix.
32
33 Args:
34 confmat: Confusion matrix without normalization
35 num_classes: Number of classes for a given prediction and target tensor
36 ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute
37 to the returned score, regardless of reduction method.
38 absent_score: score to use for an individual class, if no instances of the class index were present in `pred`
39 AND no instances of the class index were present in `target`.
40 reduction: a method to reduce metric score over labels.
41
42 - ``'elementwise_mean'``: takes the mean (default)
43 - ``'sum'``: takes the sum
44 - ``'none'``: no reduction will be applied
45 """
46
47 # Remove the ignored class index from the scores.
48 if ignore_index is not None and 0 <= ignore_index < num_classes:
49 confmat[ignore_index] = 0.0
50
51 intersection = torch.diag(confmat)
52 union = confmat.sum(0) + confmat.sum(1) - intersection
53
54 # If this class is absent in both target AND pred (union == 0), then use the absent_score for this class.
55 scores = intersection.float() / union.float()
56 scores[union == 0] = absent_score
57
58 if ignore_index is not None and 0 <= ignore_index < num_classes:
59 scores = torch.cat(
60 [
61 scores[:ignore_index],
62 scores[ignore_index + 1 :],
63 ]
64 )
65
66 return reduce(scores, reduction=reduction)
67
68
69 def jaccard_index(
70 preds: Tensor,
71 target: Tensor,
72 ignore_index: Optional[int] = None,
73 absent_score: float = 0.0,
74 threshold: float = 0.5,
75 num_classes: Optional[int] = None,
76 reduction: str = "elementwise_mean",
77 ) -> Tensor:
78 r"""
79 Computes `Jaccard index`_
80
81 .. math:: J(A,B) = \frac{|A\cap B|}{|A\cup B|}
82
83 Where: :math:`A` and :math:`B` are both tensors of the same size,
84 containing integer class values. They may be subject to conversion from
85 input data (see description below).
86
87 Note that it is different from box IoU.
88
89 If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument
90 to convert into integer labels. This is the case for binary and multi-label probabilities.
91
92 If pred has an extra dimension as in the case of multi-class scores we
93 perform an argmax on ``dim=1``.
94
95 Args:
96 preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]``
97 target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]``
98 ignore_index: optional int specifying a target class to ignore. If given,
99 this class index does not contribute to the returned score, regardless
100 of reduction method. Has no effect if given an int that is not in the
101 range [0, num_classes-1], where num_classes is either given or derived
102 from pred and target. By default, no index is ignored, and all classes are used.
103 absent_score: score to use for an individual class, if no instances of
104 the class index were present in `pred` AND no instances of the class
105 index were present in `target`. For example, if we have 3 classes,
106 [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be
107 assigned the `absent_score`.
108 threshold:
109 Threshold value for binary or multi-label probabilities.
110 num_classes:
111 Optionally specify the number of classes
112 reduction: a method to reduce metric score over labels.
113
114 - ``'elementwise_mean'``: takes the mean (default)
115 - ``'sum'``: takes the sum
116 - ``'none'``: no reduction will be applied
117
118 Return:
119 IoU score: Tensor containing single value if reduction is
120 'elementwise_mean', or number of classes if reduction is 'none'
121
122 Example:
123 >>> from torchmetrics.functional import jaccard_index
124 >>> target = torch.randint(0, 2, (10, 25, 25))
125 >>> pred = torch.tensor(target)
126 >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]
127 >>> jaccard_index(pred, target)
128 tensor(0.9660)
129 """
130
131 num_classes = get_num_classes(preds=preds, target=target, num_classes=num_classes)
132 confmat = _confusion_matrix_update(preds, target, num_classes, threshold)
133 return _jaccard_from_confmat(confmat, num_classes, ignore_index, absent_score, reduction)
134
[end of torchmetrics/functional/classification/jaccard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchmetrics/functional/classification/jaccard.py b/torchmetrics/functional/classification/jaccard.py
--- a/torchmetrics/functional/classification/jaccard.py
+++ b/torchmetrics/functional/classification/jaccard.py
@@ -69,10 +69,10 @@
def jaccard_index(
preds: Tensor,
target: Tensor,
+ num_classes: int,
ignore_index: Optional[int] = None,
absent_score: float = 0.0,
threshold: float = 0.5,
- num_classes: Optional[int] = None,
reduction: str = "elementwise_mean",
) -> Tensor:
r"""
@@ -95,6 +95,7 @@
Args:
preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]``
target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]``
+ num_classes: Specify the number of classes
ignore_index: optional int specifying a target class to ignore. If given,
this class index does not contribute to the returned score, regardless
of reduction method. Has no effect if given an int that is not in the
@@ -107,8 +108,7 @@
assigned the `absent_score`.
threshold:
Threshold value for binary or multi-label probabilities.
- num_classes:
- Optionally specify the number of classes
+
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
@@ -124,7 +124,7 @@
>>> target = torch.randint(0, 2, (10, 25, 25))
>>> pred = torch.tensor(target)
>>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]
- >>> jaccard_index(pred, target)
+ >>> jaccard_index(pred, target, num_classes=2)
tensor(0.9660)
"""
|
{"golden_diff": "diff --git a/torchmetrics/functional/classification/jaccard.py b/torchmetrics/functional/classification/jaccard.py\n--- a/torchmetrics/functional/classification/jaccard.py\n+++ b/torchmetrics/functional/classification/jaccard.py\n@@ -69,10 +69,10 @@\n def jaccard_index(\n preds: Tensor,\n target: Tensor,\n+ num_classes: int,\n ignore_index: Optional[int] = None,\n absent_score: float = 0.0,\n threshold: float = 0.5,\n- num_classes: Optional[int] = None,\n reduction: str = \"elementwise_mean\",\n ) -> Tensor:\n r\"\"\"\n@@ -95,6 +95,7 @@\n Args:\n preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]``\n target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]``\n+ num_classes: Specify the number of classes\n ignore_index: optional int specifying a target class to ignore. If given,\n this class index does not contribute to the returned score, regardless\n of reduction method. Has no effect if given an int that is not in the\n@@ -107,8 +108,7 @@\n assigned the `absent_score`.\n threshold:\n Threshold value for binary or multi-label probabilities.\n- num_classes:\n- Optionally specify the number of classes\n+\n reduction: a method to reduce metric score over labels.\n \n - ``'elementwise_mean'``: takes the mean (default)\n@@ -124,7 +124,7 @@\n >>> target = torch.randint(0, 2, (10, 25, 25))\n >>> pred = torch.tensor(target)\n >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]\n- >>> jaccard_index(pred, target)\n+ >>> jaccard_index(pred, target, num_classes=2)\n tensor(0.9660)\n \"\"\"\n", "issue": "Jaccard Index behaviour when num_classes is absent\nThe [jaccard index function](https://github.com/PyTorchLightning/metrics/blob/8fade87062a7b87c1e6429bbe1c4e0112b3713a5/torchmetrics/functional/classification/jaccard.py#L69) determines the number of classes automatically.\r\nThis can result in strange results if the user forgets to specify `num_classes`. For example, in IoU determination in semantic segmentation, not all images have all classes. So determining them automatically will almost always lead to wrong number.\r\nAdditionally, the [warning](https://github.com/PyTorchLightning/metrics/blob/8fade87062a7b87c1e6429bbe1c4e0112b3713a5/torchmetrics/utilities/data.py#L156) that shows up makes it seem like something is wrong, when the user specifies the number of classes explicitly (but this is not the case at all):\r\n\r\n```python\r\n/run/determined/pythonuserbase/lib/python3.7/site-packages/torchmetrics/utilities/prints.py:36: RuntimeWarning: You have set 11 number of classes which is different from predicted (1000) and target (10) number of classes\r\n```\r\n\r\nWould it be crazy if `num_classes` requirement is default and the automatic determination of classes is a special option that must be enabled?\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.confusion_matrix import _confusion_matrix_update\nfrom torchmetrics.utilities.data import get_num_classes\nfrom torchmetrics.utilities.distributed import reduce\n\n\ndef _jaccard_from_confmat(\n confmat: Tensor,\n num_classes: int,\n ignore_index: Optional[int] = None,\n absent_score: float = 0.0,\n reduction: str = \"elementwise_mean\",\n) -> Tensor:\n \"\"\"Computes the intersection over union from confusion matrix.\n\n Args:\n confmat: Confusion matrix without normalization\n num_classes: Number of classes for a given prediction and target tensor\n ignore_index: optional int specifying a target class to ignore. If given, this class index does not contribute\n to the returned score, regardless of reduction method.\n absent_score: score to use for an individual class, if no instances of the class index were present in `pred`\n AND no instances of the class index were present in `target`.\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'``: no reduction will be applied\n \"\"\"\n\n # Remove the ignored class index from the scores.\n if ignore_index is not None and 0 <= ignore_index < num_classes:\n confmat[ignore_index] = 0.0\n\n intersection = torch.diag(confmat)\n union = confmat.sum(0) + confmat.sum(1) - intersection\n\n # If this class is absent in both target AND pred (union == 0), then use the absent_score for this class.\n scores = intersection.float() / union.float()\n scores[union == 0] = absent_score\n\n if ignore_index is not None and 0 <= ignore_index < num_classes:\n scores = torch.cat(\n [\n scores[:ignore_index],\n scores[ignore_index + 1 :],\n ]\n )\n\n return reduce(scores, reduction=reduction)\n\n\ndef jaccard_index(\n preds: Tensor,\n target: Tensor,\n ignore_index: Optional[int] = None,\n absent_score: float = 0.0,\n threshold: float = 0.5,\n num_classes: Optional[int] = None,\n reduction: str = \"elementwise_mean\",\n) -> Tensor:\n r\"\"\"\n Computes `Jaccard index`_\n\n .. math:: J(A,B) = \\frac{|A\\cap B|}{|A\\cup B|}\n\n Where: :math:`A` and :math:`B` are both tensors of the same size,\n containing integer class values. They may be subject to conversion from\n input data (see description below).\n\n Note that it is different from box IoU.\n\n If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument\n to convert into integer labels. This is the case for binary and multi-label probabilities.\n\n If pred has an extra dimension as in the case of multi-class scores we\n perform an argmax on ``dim=1``.\n\n Args:\n preds: tensor containing predictions from model (probabilities, or labels) with shape ``[N, d1, d2, ...]``\n target: tensor containing ground truth labels with shape ``[N, d1, d2, ...]``\n ignore_index: optional int specifying a target class to ignore. If given,\n this class index does not contribute to the returned score, regardless\n of reduction method. Has no effect if given an int that is not in the\n range [0, num_classes-1], where num_classes is either given or derived\n from pred and target. By default, no index is ignored, and all classes are used.\n absent_score: score to use for an individual class, if no instances of\n the class index were present in `pred` AND no instances of the class\n index were present in `target`. For example, if we have 3 classes,\n [0, 0] for `pred`, and [0, 2] for `target`, then class 1 would be\n assigned the `absent_score`.\n threshold:\n Threshold value for binary or multi-label probabilities.\n num_classes:\n Optionally specify the number of classes\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'``: no reduction will be applied\n\n Return:\n IoU score: Tensor containing single value if reduction is\n 'elementwise_mean', or number of classes if reduction is 'none'\n\n Example:\n >>> from torchmetrics.functional import jaccard_index\n >>> target = torch.randint(0, 2, (10, 25, 25))\n >>> pred = torch.tensor(target)\n >>> pred[2:5, 7:13, 9:15] = 1 - pred[2:5, 7:13, 9:15]\n >>> jaccard_index(pred, target)\n tensor(0.9660)\n \"\"\"\n\n num_classes = get_num_classes(preds=preds, target=target, num_classes=num_classes)\n confmat = _confusion_matrix_update(preds, target, num_classes, threshold)\n return _jaccard_from_confmat(confmat, num_classes, ignore_index, absent_score, reduction)\n", "path": "torchmetrics/functional/classification/jaccard.py"}]}
| 2,513 | 485 |
gh_patches_debug_24631
|
rasdani/github-patches
|
git_diff
|
pypa__pipenv-5401
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
using conda python to create pipenv adds both python_version and python_full_version to pipfile
Hello,
If I use a miniconda environment's python for creating a pipenv environment, it adds BOTH, a python_version and python_full_version (e.g. 3.9 and 3.9.13) to the pipfile, and that throws an error.
System details:
I am using Ubuntu 22.04 via WSL.
System python is: 3.10.6
Pip version is: 22.2.2
Pipenv installed via pip (outside of any environment): 2022.10.9
Conda version is: 22.9.0
All are the latest right now
Steps to reproduce the error:
(mlzoomcamp is a conda environment with python 3.9.13 (but I also used a conda env with python 3.10 and the same issue persisted))
```
(base) mona@Memoona-PC:~$ pipenv --python=/home/mona/miniconda3/envs/mlzoomcamp/bin/python
# runs fine, but when I install something:
(base) mona@Memoona-PC:~/extra$ pipenv install numpy
```
It installs numpy correctly, but then throws this error:
```
Installing numpy...
Adding numpy to Pipfile's [packages]...
✔ Installation Succeeded
Pipfile.lock not found, creating...
Traceback (most recent call last):
File "/home/mona/.local/bin/pipenv", line 8, in <module>
sys.exit(cli())
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/cli/options.py", line 57, in main
return super().main(*args, **kwargs, windows_expand_args=False)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/decorators.py", line 84, in new_func
return ctx.invoke(f, obj, *args, **kwargs)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/cli/command.py", line 236, in install
do_install(
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/core.py", line 2389, in do_install
do_init(
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/core.py", line 1303, in do_init
do_lock(
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/core.py", line 1070, in do_lock
lockfile = project._lockfile(categories=lockfile_categories)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/project.py", line 581, in _lockfile
plette.Pipfile.load(pf), categories=categories
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/pipfiles.py", line 59, in load
return cls(data)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/models/base.py", line 49, in __init__
self.validate(data)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/pipfiles.py", line 43, in validate
klass.validate(data[key])
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/models/base.py", line 81, in validate
return validate(cls, data)
File "/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/models/base.py", line 38, in validate
raise ValidationError(data, v)
pipenv.vendor.plette.models.base.ValidationError: {'python_version': '3.9', 'python_full_version': '3.9.13'}
python_full_version: 'python_version' must not be present with 'python_full_version'
python_version: 'python_full_version' must not be present with 'python_version'
```
The pipenv file at this time looks like this:
```
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
numpy = "*"
[dev-packages]
[requires]
python_version = "3.9"
python_full_version = "3.9.13"
```
I looked at the pipfile before the install command, and it has already added the 2 versions as soon as the environment was created.
I did find a fix, to manually remove the python_version line from pipfile.
Also, if I use the system python to create an environment, this issue doesn't happen, so this is most likely a conda issue. Do I have to manually remove the python_version each time from the pipfile?
</issue>
<code>
[start of pipenv/vendor/plette/models/sections.py]
1 from .base import DataView, DataViewMapping, DataViewSequence
2 from .hashes import Hash
3 from .packages import Package
4 from .scripts import Script
5 from .sources import Source
6
7
8 class PackageCollection(DataViewMapping):
9 item_class = Package
10
11
12 class ScriptCollection(DataViewMapping):
13 item_class = Script
14
15
16 class SourceCollection(DataViewSequence):
17 item_class = Source
18
19
20 class Requires(DataView):
21 """Representation of the `[requires]` section in a Pipfile.
22 """
23 __SCHEMA__ = {
24 "python_version": {
25 "type": "string",
26 "excludes": ["python_full_version"],
27 },
28 "python_full_version": {
29 "type": "string",
30 "excludes": ["python_version"],
31 },
32 }
33
34 @property
35 def python_version(self):
36 try:
37 return self._data["python_version"]
38 except KeyError:
39 raise AttributeError("python_version")
40
41 @property
42 def python_full_version(self):
43 try:
44 return self._data["python_full_version"]
45 except KeyError:
46 raise AttributeError("python_full_version")
47
48
49 META_SECTIONS = {
50 "hash": Hash,
51 "requires": Requires,
52 "sources": SourceCollection,
53 }
54
55
56 class Meta(DataView):
57 """Representation of the `_meta` section in a Pipfile.lock.
58 """
59 __SCHEMA__ = {
60 "hash": {"type": "dict", "required": True},
61 "pipfile-spec": {"type": "integer", "required": True, "min": 0},
62 "requires": {"type": "dict", "required": True},
63 "sources": {"type": "list", "required": True},
64 }
65
66 @classmethod
67 def validate(cls, data):
68 super(Meta, cls).validate(data)
69 for key, klass in META_SECTIONS.items():
70 klass.validate(data[key])
71
72 def __getitem__(self, key):
73 value = super(Meta, self).__getitem__(key)
74 try:
75 return META_SECTIONS[key](value)
76 except KeyError:
77 return value
78
79 def __setitem__(self, key, value):
80 if isinstance(value, DataView):
81 self._data[key] = value._data
82 else:
83 self._data[key] = value
84
85 @property
86 def hash_(self):
87 return self["hash"]
88
89 @hash_.setter
90 def hash_(self, value):
91 self["hash"] = value
92
93 @property
94 def hash(self):
95 return self["hash"]
96
97 @hash.setter
98 def hash(self, value):
99 self["hash"] = value
100
101 @property
102 def pipfile_spec(self):
103 return self["pipfile-spec"]
104
105 @pipfile_spec.setter
106 def pipfile_spec(self, value):
107 self["pipfile-spec"] = value
108
109 @property
110 def requires(self):
111 return self["requires"]
112
113 @requires.setter
114 def requires(self, value):
115 self["requires"] = value
116
117 @property
118 def sources(self):
119 return self["sources"]
120
121 @sources.setter
122 def sources(self, value):
123 self["sources"] = value
124
[end of pipenv/vendor/plette/models/sections.py]
[start of pipenv/vendor/plette/__init__.py]
1 __all__ = [
2 "__version__",
3 "Lockfile", "Pipfile",
4 ]
5
6 __version__ = '0.4.1'
7
8 from .lockfiles import Lockfile
9 from .pipfiles import Pipfile
10
[end of pipenv/vendor/plette/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pipenv/vendor/plette/__init__.py b/pipenv/vendor/plette/__init__.py
--- a/pipenv/vendor/plette/__init__.py
+++ b/pipenv/vendor/plette/__init__.py
@@ -3,7 +3,7 @@
"Lockfile", "Pipfile",
]
-__version__ = '0.4.1'
+__version__ = '0.4.2'
from .lockfiles import Lockfile
from .pipfiles import Pipfile
diff --git a/pipenv/vendor/plette/models/sections.py b/pipenv/vendor/plette/models/sections.py
--- a/pipenv/vendor/plette/models/sections.py
+++ b/pipenv/vendor/plette/models/sections.py
@@ -18,16 +18,14 @@
class Requires(DataView):
- """Representation of the `[requires]` section in a Pipfile.
- """
+ """Representation of the `[requires]` section in a Pipfile."""
+
__SCHEMA__ = {
"python_version": {
"type": "string",
- "excludes": ["python_full_version"],
},
"python_full_version": {
"type": "string",
- "excludes": ["python_version"],
},
}
@@ -54,8 +52,8 @@
class Meta(DataView):
- """Representation of the `_meta` section in a Pipfile.lock.
- """
+ """Representation of the `_meta` section in a Pipfile.lock."""
+
__SCHEMA__ = {
"hash": {"type": "dict", "required": True},
"pipfile-spec": {"type": "integer", "required": True, "min": 0},
|
{"golden_diff": "diff --git a/pipenv/vendor/plette/__init__.py b/pipenv/vendor/plette/__init__.py\n--- a/pipenv/vendor/plette/__init__.py\n+++ b/pipenv/vendor/plette/__init__.py\n@@ -3,7 +3,7 @@\n \"Lockfile\", \"Pipfile\",\n ]\n \n-__version__ = '0.4.1'\n+__version__ = '0.4.2'\n \n from .lockfiles import Lockfile\n from .pipfiles import Pipfile\ndiff --git a/pipenv/vendor/plette/models/sections.py b/pipenv/vendor/plette/models/sections.py\n--- a/pipenv/vendor/plette/models/sections.py\n+++ b/pipenv/vendor/plette/models/sections.py\n@@ -18,16 +18,14 @@\n \n \n class Requires(DataView):\n- \"\"\"Representation of the `[requires]` section in a Pipfile.\n- \"\"\"\n+ \"\"\"Representation of the `[requires]` section in a Pipfile.\"\"\"\n+\n __SCHEMA__ = {\n \"python_version\": {\n \"type\": \"string\",\n- \"excludes\": [\"python_full_version\"],\n },\n \"python_full_version\": {\n \"type\": \"string\",\n- \"excludes\": [\"python_version\"],\n },\n }\n \n@@ -54,8 +52,8 @@\n \n \n class Meta(DataView):\n- \"\"\"Representation of the `_meta` section in a Pipfile.lock.\n- \"\"\"\n+ \"\"\"Representation of the `_meta` section in a Pipfile.lock.\"\"\"\n+\n __SCHEMA__ = {\n \"hash\": {\"type\": \"dict\", \"required\": True},\n \"pipfile-spec\": {\"type\": \"integer\", \"required\": True, \"min\": 0},\n", "issue": "using conda python to create pipenv adds both python_version and python_full_version to pipfile\nHello, \r\n\r\nIf I use a miniconda environment's python for creating a pipenv environment, it adds BOTH, a python_version and python_full_version (e.g. 3.9 and 3.9.13) to the pipfile, and that throws an error.\r\n\r\nSystem details:\r\n\r\nI am using Ubuntu 22.04 via WSL. \r\nSystem python is: 3.10.6\r\nPip version is: 22.2.2\r\nPipenv installed via pip (outside of any environment): 2022.10.9\r\nConda version is: 22.9.0\r\nAll are the latest right now\r\n\r\n\r\nSteps to reproduce the error:\r\n\r\n(mlzoomcamp is a conda environment with python 3.9.13 (but I also used a conda env with python 3.10 and the same issue persisted)) \r\n\r\n```\r\n(base) mona@Memoona-PC:~$ pipenv --python=/home/mona/miniconda3/envs/mlzoomcamp/bin/python\r\n# runs fine, but when I install something:\r\n(base) mona@Memoona-PC:~/extra$ pipenv install numpy\r\n```\r\n\r\nIt installs numpy correctly, but then throws this error: \r\n\r\n```\r\nInstalling numpy...\r\nAdding numpy to Pipfile's [packages]...\r\n\u2714 Installation Succeeded\r\nPipfile.lock not found, creating...\r\nTraceback (most recent call last):\r\n File \"/home/mona/.local/bin/pipenv\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py\", line 1128, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/cli/options.py\", line 57, in main\r\n return super().main(*args, **kwargs, windows_expand_args=False)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py\", line 1053, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py\", line 1659, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py\", line 1395, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/decorators.py\", line 84, in new_func\r\n return ctx.invoke(f, obj, *args, **kwargs)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/cli/command.py\", line 236, in install\r\n do_install(\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/core.py\", line 2389, in do_install\r\n do_init(\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/core.py\", line 1303, in do_init\r\n do_lock(\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/core.py\", line 1070, in do_lock\r\n lockfile = project._lockfile(categories=lockfile_categories)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/project.py\", line 581, in _lockfile\r\n plette.Pipfile.load(pf), categories=categories\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/pipfiles.py\", line 59, in load\r\n return cls(data)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/models/base.py\", line 49, in __init__\r\n self.validate(data)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/pipfiles.py\", line 43, in validate\r\n klass.validate(data[key])\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/models/base.py\", line 81, in validate\r\n return validate(cls, data)\r\n File \"/home/mona/.local/lib/python3.10/site-packages/pipenv/vendor/plette/models/base.py\", line 38, in validate\r\n raise ValidationError(data, v)\r\npipenv.vendor.plette.models.base.ValidationError: {'python_version': '3.9', 'python_full_version': '3.9.13'}\r\npython_full_version: 'python_version' must not be present with 'python_full_version'\r\npython_version: 'python_full_version' must not be present with 'python_version'\r\n```\r\n\r\nThe pipenv file at this time looks like this:\r\n\r\n```\r\n[[source]]\r\nurl = \"https://pypi.org/simple\"\r\nverify_ssl = true\r\nname = \"pypi\"\r\n\r\n[packages]\r\nnumpy = \"*\"\r\n\r\n[dev-packages]\r\n\r\n[requires]\r\npython_version = \"3.9\"\r\npython_full_version = \"3.9.13\"\r\n```\r\n\r\nI looked at the pipfile before the install command, and it has already added the 2 versions as soon as the environment was created.\r\n\r\nI did find a fix, to manually remove the python_version line from pipfile. \r\n\r\nAlso, if I use the system python to create an environment, this issue doesn't happen, so this is most likely a conda issue. Do I have to manually remove the python_version each time from the pipfile? \r\n\r\n\n", "before_files": [{"content": "from .base import DataView, DataViewMapping, DataViewSequence\nfrom .hashes import Hash\nfrom .packages import Package\nfrom .scripts import Script\nfrom .sources import Source\n\n\nclass PackageCollection(DataViewMapping):\n item_class = Package\n\n\nclass ScriptCollection(DataViewMapping):\n item_class = Script\n\n\nclass SourceCollection(DataViewSequence):\n item_class = Source\n\n\nclass Requires(DataView):\n \"\"\"Representation of the `[requires]` section in a Pipfile.\n \"\"\"\n __SCHEMA__ = {\n \"python_version\": {\n \"type\": \"string\",\n \"excludes\": [\"python_full_version\"],\n },\n \"python_full_version\": {\n \"type\": \"string\",\n \"excludes\": [\"python_version\"],\n },\n }\n\n @property\n def python_version(self):\n try:\n return self._data[\"python_version\"]\n except KeyError:\n raise AttributeError(\"python_version\")\n\n @property\n def python_full_version(self):\n try:\n return self._data[\"python_full_version\"]\n except KeyError:\n raise AttributeError(\"python_full_version\")\n\n\nMETA_SECTIONS = {\n \"hash\": Hash,\n \"requires\": Requires,\n \"sources\": SourceCollection,\n}\n\n\nclass Meta(DataView):\n \"\"\"Representation of the `_meta` section in a Pipfile.lock.\n \"\"\"\n __SCHEMA__ = {\n \"hash\": {\"type\": \"dict\", \"required\": True},\n \"pipfile-spec\": {\"type\": \"integer\", \"required\": True, \"min\": 0},\n \"requires\": {\"type\": \"dict\", \"required\": True},\n \"sources\": {\"type\": \"list\", \"required\": True},\n }\n\n @classmethod\n def validate(cls, data):\n super(Meta, cls).validate(data)\n for key, klass in META_SECTIONS.items():\n klass.validate(data[key])\n\n def __getitem__(self, key):\n value = super(Meta, self).__getitem__(key)\n try:\n return META_SECTIONS[key](value)\n except KeyError:\n return value\n\n def __setitem__(self, key, value):\n if isinstance(value, DataView):\n self._data[key] = value._data\n else:\n self._data[key] = value\n\n @property\n def hash_(self):\n return self[\"hash\"]\n\n @hash_.setter\n def hash_(self, value):\n self[\"hash\"] = value\n\n @property\n def hash(self):\n return self[\"hash\"]\n\n @hash.setter\n def hash(self, value):\n self[\"hash\"] = value\n\n @property\n def pipfile_spec(self):\n return self[\"pipfile-spec\"]\n\n @pipfile_spec.setter\n def pipfile_spec(self, value):\n self[\"pipfile-spec\"] = value\n\n @property\n def requires(self):\n return self[\"requires\"]\n\n @requires.setter\n def requires(self, value):\n self[\"requires\"] = value\n\n @property\n def sources(self):\n return self[\"sources\"]\n\n @sources.setter\n def sources(self, value):\n self[\"sources\"] = value\n", "path": "pipenv/vendor/plette/models/sections.py"}, {"content": "__all__ = [\n \"__version__\",\n \"Lockfile\", \"Pipfile\",\n]\n\n__version__ = '0.4.1'\n\nfrom .lockfiles import Lockfile\nfrom .pipfiles import Pipfile\n", "path": "pipenv/vendor/plette/__init__.py"}]}
| 2,968 | 392 |
gh_patches_debug_7461
|
rasdani/github-patches
|
git_diff
|
conda__conda-5357
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
latest 4.3.x (4.3.18-37-g79f86d4c) not picking up conda-build subcommands
From conda-build's test suite:
```
________________________________ test_skeleton_pypi ________________________________
Traceback (most recent call last):
File "/home/dev/code/conda-build/tests/test_published_examples.py", line 15, in test_skeleton_pypi
check_call_env(cmd.split())
File "/home/dev/code/conda-build/conda_build/utils.py", line 670, in check_call_env
return _func_defaulting_env_to_os_environ(subprocess.check_call, *popenargs, **kwargs)
File "/home/dev/code/conda-build/conda_build/utils.py", line 666, in _func_defaulting_env_to_os_environ
return func(_args, **kwargs)
File "/opt/miniconda/lib/python2.7/subprocess.py", line 541, in check_call
raise CalledProcessError(retcode, cmd)
CalledProcessError: Command '['conda', 'skeleton', 'pypi', 'pyinstrument']' returned non-zero exit status 2
------------------------------- Captured stderr call -------------------------------
usage: conda [-h] [-V] command ...
conda: error: argument command: invalid choice: u'skeleton' (choose from u'info', u'help', u'list', u'search', u'create', u'install', u'update', u'upgrade', u'remove', u'uninstall', u'config', u'clean', u'package')
```
This seems to happen only with python 2.7, not 3.6: https://travis-ci.org/conda/conda-build/builds/232848688
</issue>
<code>
[start of conda/cli/conda_argparse.py]
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 from __future__ import print_function, division, absolute_import, unicode_literals
8
9 import argparse
10 import os
11 import subprocess
12 import sys
13
14 from .common import add_parser_help
15 from .find_commands import find_commands, find_executable
16 from ..exceptions import CommandNotFoundError
17
18
19 _ARGCOMPLETE_DEBUG = False
20 def debug_argcomplete(msg):
21 # To debug this, replace ttys001 with the fd of the terminal you are using
22 # (use the `tty` command to find this), and set _ARGCOMPLETE_DEBUG above
23 # to True. You can also `export _ARC_DEBUG=1` in the shell you are using
24 # to print debug messages from argcomplete.
25 if _ARGCOMPLETE_DEBUG:
26 f = open('/dev/ttys001', 'w')
27 f.write("\n%s\n" % msg)
28 f.flush()
29
30
31 try:
32 import argcomplete
33 argcomplete.CompletionFinder
34 except (ImportError, AttributeError):
35 # On Python 3.3, argcomplete can be an empty namespace package when
36 # we are in the conda-recipes directory.
37 argcomplete = None
38
39 if argcomplete:
40 class CondaSubprocessCompletionFinder(argcomplete.CompletionFinder):
41 def __call__(self, argument_parser, **kwargs):
42 def call_super():
43 parent = super(CondaSubprocessCompletionFinder, self)
44 return parent.__call__(argument_parser, **kwargs)
45
46 debug_argcomplete("Working")
47
48 if argument_parser.prog != 'conda':
49 debug_argcomplete("Argument parser is not conda")
50 return call_super()
51
52 environ = os.environ.copy()
53 if 'COMP_LINE' not in environ:
54 debug_argcomplete("COMP_LINE not in environ")
55 return call_super()
56
57 subcommands = find_commands()
58 for subcommand in subcommands:
59 if 'conda %s' % subcommand in environ['COMP_LINE']:
60 environ['COMP_LINE'] = environ['COMP_LINE'].replace('conda %s' % subcommand,
61 'conda-%s' % subcommand)
62 debug_argcomplete("Using subprocess")
63 debug_argcomplete(sys.argv)
64 import pprint
65 debug_argcomplete(pprint.pformat(environ))
66 args = [find_executable('conda-%s' % subcommand)]
67 debug_argcomplete(args)
68 p = subprocess.Popen(args, env=environ, close_fds=False)
69 p.communicate()
70 sys.exit()
71 else:
72 debug_argcomplete("Not using subprocess")
73 debug_argcomplete(sys.argv)
74 debug_argcomplete(argument_parser)
75 return call_super()
76
77 class ArgumentParser(argparse.ArgumentParser):
78 def __init__(self, *args, **kwargs):
79 if not kwargs.get('formatter_class'):
80 kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter
81 if 'add_help' not in kwargs:
82 add_custom_help = True
83 kwargs['add_help'] = False
84 else:
85 add_custom_help = False
86 super(ArgumentParser, self).__init__(*args, **kwargs)
87
88 if add_custom_help:
89 add_parser_help(self)
90
91 if self.description:
92 self.description += "\n\nOptions:\n"
93
94 def _get_action_from_name(self, name):
95 """Given a name, get the Action instance registered with this parser.
96 If only it were made available in the ArgumentError object. It is
97 passed as it's first arg...
98 """
99 container = self._actions
100 if name is None:
101 return None
102 for action in container:
103 if '/'.join(action.option_strings) == name:
104 return action
105 elif action.metavar == name:
106 return action
107 elif action.dest == name:
108 return action
109
110 def error(self, message):
111 import re
112 import subprocess
113 from .find_commands import find_executable
114
115 exc = sys.exc_info()[1]
116 if exc:
117 # this is incredibly lame, but argparse stupidly does not expose
118 # reasonable hooks for customizing error handling
119 if hasattr(exc, 'argument_name'):
120 argument = self._get_action_from_name(exc.argument_name)
121 else:
122 argument = None
123 if argument and argument.dest == "cmd":
124 m = re.compile(r"invalid choice: '([\w\-]+)'").match(exc.message)
125 if m:
126 cmd = m.group(1)
127 executable = find_executable('conda-' + cmd)
128 if not executable:
129 raise CommandNotFoundError(cmd)
130
131 args = [find_executable('conda-' + cmd)]
132 args.extend(sys.argv[2:])
133 p = subprocess.Popen(args)
134 try:
135 p.communicate()
136 except KeyboardInterrupt:
137 p.wait()
138 finally:
139 sys.exit(p.returncode)
140
141 super(ArgumentParser, self).error(message)
142
143 def print_help(self):
144 super(ArgumentParser, self).print_help()
145
146 if self.prog == 'conda' and sys.argv[1:] in ([], ['help'], ['-h'], ['--help']):
147 print("""
148 other commands, such as "conda build", are avaialble when additional conda
149 packages (e.g. conda-build) are installed
150 """)
151
152 def parse_args(self, *args, **kwargs):
153 if argcomplete:
154 CondaSubprocessCompletionFinder()(self)
155
156 return super(ArgumentParser, self).parse_args(*args, **kwargs)
157
[end of conda/cli/conda_argparse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/cli/conda_argparse.py b/conda/cli/conda_argparse.py
--- a/conda/cli/conda_argparse.py
+++ b/conda/cli/conda_argparse.py
@@ -121,7 +121,7 @@
else:
argument = None
if argument and argument.dest == "cmd":
- m = re.compile(r"invalid choice: '([\w\-]+)'").match(exc.message)
+ m = re.compile(r"invalid choice: u?'([\w\-]+)'").match(exc.message)
if m:
cmd = m.group(1)
executable = find_executable('conda-' + cmd)
|
{"golden_diff": "diff --git a/conda/cli/conda_argparse.py b/conda/cli/conda_argparse.py\n--- a/conda/cli/conda_argparse.py\n+++ b/conda/cli/conda_argparse.py\n@@ -121,7 +121,7 @@\n else:\n argument = None\n if argument and argument.dest == \"cmd\":\n- m = re.compile(r\"invalid choice: '([\\w\\-]+)'\").match(exc.message)\n+ m = re.compile(r\"invalid choice: u?'([\\w\\-]+)'\").match(exc.message)\n if m:\n cmd = m.group(1)\n executable = find_executable('conda-' + cmd)\n", "issue": "latest 4.3.x (4.3.18-37-g79f86d4c) not picking up conda-build subcommands\nFrom conda-build's test suite:\r\n\r\n```\r\n________________________________ test_skeleton_pypi ________________________________\r\nTraceback (most recent call last):\r\n File \"/home/dev/code/conda-build/tests/test_published_examples.py\", line 15, in test_skeleton_pypi\r\n check_call_env(cmd.split())\r\n File \"/home/dev/code/conda-build/conda_build/utils.py\", line 670, in check_call_env\r\n return _func_defaulting_env_to_os_environ(subprocess.check_call, *popenargs, **kwargs)\r\n File \"/home/dev/code/conda-build/conda_build/utils.py\", line 666, in _func_defaulting_env_to_os_environ\r\n return func(_args, **kwargs)\r\n File \"/opt/miniconda/lib/python2.7/subprocess.py\", line 541, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\nCalledProcessError: Command '['conda', 'skeleton', 'pypi', 'pyinstrument']' returned non-zero exit status 2\r\n------------------------------- Captured stderr call -------------------------------\r\nusage: conda [-h] [-V] command ...\r\nconda: error: argument command: invalid choice: u'skeleton' (choose from u'info', u'help', u'list', u'search', u'create', u'install', u'update', u'upgrade', u'remove', u'uninstall', u'config', u'clean', u'package')\r\n```\r\n\r\nThis seems to happen only with python 2.7, not 3.6: https://travis-ci.org/conda/conda-build/builds/232848688\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\nfrom __future__ import print_function, division, absolute_import, unicode_literals\n\nimport argparse\nimport os\nimport subprocess\nimport sys\n\nfrom .common import add_parser_help\nfrom .find_commands import find_commands, find_executable\nfrom ..exceptions import CommandNotFoundError\n\n\n_ARGCOMPLETE_DEBUG = False\ndef debug_argcomplete(msg):\n # To debug this, replace ttys001 with the fd of the terminal you are using\n # (use the `tty` command to find this), and set _ARGCOMPLETE_DEBUG above\n # to True. You can also `export _ARC_DEBUG=1` in the shell you are using\n # to print debug messages from argcomplete.\n if _ARGCOMPLETE_DEBUG:\n f = open('/dev/ttys001', 'w')\n f.write(\"\\n%s\\n\" % msg)\n f.flush()\n\n\ntry:\n import argcomplete\n argcomplete.CompletionFinder\nexcept (ImportError, AttributeError):\n # On Python 3.3, argcomplete can be an empty namespace package when\n # we are in the conda-recipes directory.\n argcomplete = None\n\nif argcomplete:\n class CondaSubprocessCompletionFinder(argcomplete.CompletionFinder):\n def __call__(self, argument_parser, **kwargs):\n def call_super():\n parent = super(CondaSubprocessCompletionFinder, self)\n return parent.__call__(argument_parser, **kwargs)\n\n debug_argcomplete(\"Working\")\n\n if argument_parser.prog != 'conda':\n debug_argcomplete(\"Argument parser is not conda\")\n return call_super()\n\n environ = os.environ.copy()\n if 'COMP_LINE' not in environ:\n debug_argcomplete(\"COMP_LINE not in environ\")\n return call_super()\n\n subcommands = find_commands()\n for subcommand in subcommands:\n if 'conda %s' % subcommand in environ['COMP_LINE']:\n environ['COMP_LINE'] = environ['COMP_LINE'].replace('conda %s' % subcommand,\n 'conda-%s' % subcommand)\n debug_argcomplete(\"Using subprocess\")\n debug_argcomplete(sys.argv)\n import pprint\n debug_argcomplete(pprint.pformat(environ))\n args = [find_executable('conda-%s' % subcommand)]\n debug_argcomplete(args)\n p = subprocess.Popen(args, env=environ, close_fds=False)\n p.communicate()\n sys.exit()\n else:\n debug_argcomplete(\"Not using subprocess\")\n debug_argcomplete(sys.argv)\n debug_argcomplete(argument_parser)\n return call_super()\n\nclass ArgumentParser(argparse.ArgumentParser):\n def __init__(self, *args, **kwargs):\n if not kwargs.get('formatter_class'):\n kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter\n if 'add_help' not in kwargs:\n add_custom_help = True\n kwargs['add_help'] = False\n else:\n add_custom_help = False\n super(ArgumentParser, self).__init__(*args, **kwargs)\n\n if add_custom_help:\n add_parser_help(self)\n\n if self.description:\n self.description += \"\\n\\nOptions:\\n\"\n\n def _get_action_from_name(self, name):\n \"\"\"Given a name, get the Action instance registered with this parser.\n If only it were made available in the ArgumentError object. It is\n passed as it's first arg...\n \"\"\"\n container = self._actions\n if name is None:\n return None\n for action in container:\n if '/'.join(action.option_strings) == name:\n return action\n elif action.metavar == name:\n return action\n elif action.dest == name:\n return action\n\n def error(self, message):\n import re\n import subprocess\n from .find_commands import find_executable\n\n exc = sys.exc_info()[1]\n if exc:\n # this is incredibly lame, but argparse stupidly does not expose\n # reasonable hooks for customizing error handling\n if hasattr(exc, 'argument_name'):\n argument = self._get_action_from_name(exc.argument_name)\n else:\n argument = None\n if argument and argument.dest == \"cmd\":\n m = re.compile(r\"invalid choice: '([\\w\\-]+)'\").match(exc.message)\n if m:\n cmd = m.group(1)\n executable = find_executable('conda-' + cmd)\n if not executable:\n raise CommandNotFoundError(cmd)\n\n args = [find_executable('conda-' + cmd)]\n args.extend(sys.argv[2:])\n p = subprocess.Popen(args)\n try:\n p.communicate()\n except KeyboardInterrupt:\n p.wait()\n finally:\n sys.exit(p.returncode)\n\n super(ArgumentParser, self).error(message)\n\n def print_help(self):\n super(ArgumentParser, self).print_help()\n\n if self.prog == 'conda' and sys.argv[1:] in ([], ['help'], ['-h'], ['--help']):\n print(\"\"\"\nother commands, such as \"conda build\", are avaialble when additional conda\npackages (e.g. conda-build) are installed\n\"\"\")\n\n def parse_args(self, *args, **kwargs):\n if argcomplete:\n CondaSubprocessCompletionFinder()(self)\n\n return super(ArgumentParser, self).parse_args(*args, **kwargs)\n", "path": "conda/cli/conda_argparse.py"}]}
| 2,501 | 145 |
gh_patches_debug_18833
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1548
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG][HOTFIX] Add deprecation warning for condition parameter names
In 0.7 the drift checks were updated to add Cramer's V.
Thus the previous parameter condition names ("max_psi... "max_earthmovers...") were renamed.
Previous parameters should still be re-added with deprecation warnings, for all relevant checks and conditions.
</issue>
<code>
[start of deepchecks/vision/deprecation_warnings.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages."""
12 import warnings
13
14 # Added in version 0.6.2, deprecates max_num_categories in all drift checks
15 warnings.filterwarnings(
16 action='always',
17 message=r'.*max_num_categories.*',
18 category=DeprecationWarning,
19 module=r'deepchecks.*'
20 )
21
22 # Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks
23 # Should be removed in 0.8
24 warnings.filterwarnings(
25 action='once',
26 message=r'deepchecks.vision.checks.performance is deprecated.*',
27 category=DeprecationWarning,
28 module=r'deepchecks.*'
29 )
30
31 warnings.filterwarnings(
32 action='once',
33 message=r'deepchecks.vision.checks.methodology is deprecated.*',
34 category=DeprecationWarning,
35 module=r'deepchecks.tabular.checks.methodology.*'
36 )
37
38 warnings.filterwarnings(
39 action='once',
40 message=r'deepchecks.vision.checks.distribution is deprecated.*',
41 category=DeprecationWarning,
42 module=r'deepchecks.*'
43 )
44
45 warnings.filterwarnings(
46 action='always',
47 message=r'the integrity_validation suite is deprecated.*',
48 category=DeprecationWarning,
49 module=r'deepchecks.*'
50 )
51
[end of deepchecks/vision/deprecation_warnings.py]
[start of deepchecks/tabular/deprecation_warnings.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages."""
12 import warnings
13
14 # Added in version 0.6.2, deprecates max_num_categories in all drift checks
15 warnings.filterwarnings(
16 action='always',
17 message=r'.*max_num_categories.*',
18 category=DeprecationWarning,
19 module=r'deepchecks.*'
20 )
21
22 # Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks
23 # Should be removed in 0.8
24 warnings.filterwarnings(
25 action='once',
26 message=r'deepchecks\.tabular\.checks\.performance is deprecated.*',
27 category=DeprecationWarning,
28 module=r'deepchecks.*'
29 )
30
31 warnings.filterwarnings(
32 action='once',
33 message=r'deepchecks.tabular.checks.overview is deprecated.*',
34 category=DeprecationWarning,
35 module=r'deepchecks.*'
36 )
37
38 warnings.filterwarnings(
39 action='once',
40 message=r'deepchecks.tabular.checks.methodology is deprecated.*',
41 category=DeprecationWarning,
42 module=r'deepchecks.tabular.checks.methodology.*'
43 )
44
45 warnings.filterwarnings(
46 action='once',
47 message=r'deepchecks.tabular.checks.distribution is deprecated.*',
48 category=DeprecationWarning,
49 module=r'deepchecks.*'
50 )
51
52 warnings.filterwarnings(
53 action='always',
54 message=r'the single_dataset_integrity suite is deprecated.*',
55 category=DeprecationWarning,
56 module=r'deepchecks.*'
57 )
58
59 warnings.filterwarnings(
60 action='always',
61 message=r'the train_test_leakage suite is deprecated.*',
62 category=DeprecationWarning,
63 module=r'deepchecks.*'
64 )
65
[end of deepchecks/tabular/deprecation_warnings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/tabular/deprecation_warnings.py b/deepchecks/tabular/deprecation_warnings.py
--- a/deepchecks/tabular/deprecation_warnings.py
+++ b/deepchecks/tabular/deprecation_warnings.py
@@ -62,3 +62,20 @@
category=DeprecationWarning,
module=r'deepchecks.*'
)
+
+# Added in 0.7 Warning filters for drift conditions
+# Should be removed in 0.8
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_psi_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_earth_movers_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
diff --git a/deepchecks/vision/deprecation_warnings.py b/deepchecks/vision/deprecation_warnings.py
--- a/deepchecks/vision/deprecation_warnings.py
+++ b/deepchecks/vision/deprecation_warnings.py
@@ -48,3 +48,20 @@
category=DeprecationWarning,
module=r'deepchecks.*'
)
+
+# Added in 0.7 Warning filters for drift conditions
+# Should be removed in 0.8
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_psi_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
+
+warnings.filterwarnings(
+ action='once',
+ message=r'.*max_allowed_earth_movers_score is deprecated.*',
+ category=DeprecationWarning,
+ module=r'deepchecks.*'
+)
|
{"golden_diff": "diff --git a/deepchecks/tabular/deprecation_warnings.py b/deepchecks/tabular/deprecation_warnings.py\n--- a/deepchecks/tabular/deprecation_warnings.py\n+++ b/deepchecks/tabular/deprecation_warnings.py\n@@ -62,3 +62,20 @@\n category=DeprecationWarning,\n module=r'deepchecks.*'\n )\n+\n+# Added in 0.7 Warning filters for drift conditions\n+# Should be removed in 0.8\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_psi_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_earth_movers_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\ndiff --git a/deepchecks/vision/deprecation_warnings.py b/deepchecks/vision/deprecation_warnings.py\n--- a/deepchecks/vision/deprecation_warnings.py\n+++ b/deepchecks/vision/deprecation_warnings.py\n@@ -48,3 +48,20 @@\n category=DeprecationWarning,\n module=r'deepchecks.*'\n )\n+\n+# Added in 0.7 Warning filters for drift conditions\n+# Should be removed in 0.8\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_psi_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\n+\n+warnings.filterwarnings(\n+ action='once',\n+ message=r'.*max_allowed_earth_movers_score is deprecated.*',\n+ category=DeprecationWarning,\n+ module=r'deepchecks.*'\n+)\n", "issue": "[BUG][HOTFIX] Add deprecation warning for condition parameter names\nIn 0.7 the drift checks were updated to add Cramer's V.\r\nThus the previous parameter condition names (\"max_psi... \"max_earthmovers...\") were renamed.\r\nPrevious parameters should still be re-added with deprecation warnings, for all relevant checks and conditions.\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages.\"\"\"\nimport warnings\n\n# Added in version 0.6.2, deprecates max_num_categories in all drift checks\nwarnings.filterwarnings(\n action='always',\n message=r'.*max_num_categories.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\n# Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks\n# Should be removed in 0.8\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.vision.checks.performance is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.vision.checks.methodology is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.tabular.checks.methodology.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.vision.checks.distribution is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='always',\n message=r'the integrity_validation suite is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n", "path": "deepchecks/vision/deprecation_warnings.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"This file changes default 'ignore' action of DeprecationWarnings for specific deprecation messages.\"\"\"\nimport warnings\n\n# Added in version 0.6.2, deprecates max_num_categories in all drift checks\nwarnings.filterwarnings(\n action='always',\n message=r'.*max_num_categories.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\n# Added in 0.7 Warning filters for deprecated functions in deepchecks.tabular.checks\n# Should be removed in 0.8\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks\\.tabular\\.checks\\.performance is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.tabular.checks.overview is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.tabular.checks.methodology is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.tabular.checks.methodology.*'\n)\n\nwarnings.filterwarnings(\n action='once',\n message=r'deepchecks.tabular.checks.distribution is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='always',\n message=r'the single_dataset_integrity suite is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n\nwarnings.filterwarnings(\n action='always',\n message=r'the train_test_leakage suite is deprecated.*',\n category=DeprecationWarning,\n module=r'deepchecks.*'\n)\n", "path": "deepchecks/tabular/deprecation_warnings.py"}]}
| 1,729 | 402 |
gh_patches_debug_41508
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5577
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scrapy parse doesn't support async callbacks
In master when running `scrapy parse` for a spider with `async def parse` the page is downloaded but then the spider hangs. In #4978 it instead raises `TypeError: 'async_generator' object is not iterable`. Both problems happen because the parse command calls `iterate_spider_output` and doesn't expect a Deferred or an async iterator.
scrapy parse doesn't support async callbacks
In master when running `scrapy parse` for a spider with `async def parse` the page is downloaded but then the spider hangs. In #4978 it instead raises `TypeError: 'async_generator' object is not iterable`. Both problems happen because the parse command calls `iterate_spider_output` and doesn't expect a Deferred or an async iterator.
</issue>
<code>
[start of scrapy/commands/parse.py]
1 import json
2 import logging
3 from typing import Dict
4
5 from itemadapter import is_item, ItemAdapter
6 from w3lib.url import is_url
7
8 from scrapy.commands import BaseRunSpiderCommand
9 from scrapy.http import Request
10 from scrapy.utils import display
11 from scrapy.utils.spider import iterate_spider_output, spidercls_for_request
12 from scrapy.exceptions import UsageError
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class Command(BaseRunSpiderCommand):
19 requires_project = True
20
21 spider = None
22 items: Dict[int, list] = {}
23 requests: Dict[int, list] = {}
24
25 first_response = None
26
27 def syntax(self):
28 return "[options] <url>"
29
30 def short_desc(self):
31 return "Parse URL (using its spider) and print the results"
32
33 def add_options(self, parser):
34 BaseRunSpiderCommand.add_options(self, parser)
35 parser.add_argument("--spider", dest="spider", default=None,
36 help="use this spider without looking for one")
37 parser.add_argument("--pipelines", action="store_true",
38 help="process items through pipelines")
39 parser.add_argument("--nolinks", dest="nolinks", action="store_true",
40 help="don't show links to follow (extracted requests)")
41 parser.add_argument("--noitems", dest="noitems", action="store_true",
42 help="don't show scraped items")
43 parser.add_argument("--nocolour", dest="nocolour", action="store_true",
44 help="avoid using pygments to colorize the output")
45 parser.add_argument("-r", "--rules", dest="rules", action="store_true",
46 help="use CrawlSpider rules to discover the callback")
47 parser.add_argument("-c", "--callback", dest="callback",
48 help="use this callback for parsing, instead looking for a callback")
49 parser.add_argument("-m", "--meta", dest="meta",
50 help="inject extra meta into the Request, it must be a valid raw json string")
51 parser.add_argument("--cbkwargs", dest="cbkwargs",
52 help="inject extra callback kwargs into the Request, it must be a valid raw json string")
53 parser.add_argument("-d", "--depth", dest="depth", type=int, default=1,
54 help="maximum depth for parsing requests [default: %(default)s]")
55 parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
56 help="print each depth level one by one")
57
58 @property
59 def max_level(self):
60 max_items, max_requests = 0, 0
61 if self.items:
62 max_items = max(self.items)
63 if self.requests:
64 max_requests = max(self.requests)
65 return max(max_items, max_requests)
66
67 def add_items(self, lvl, new_items):
68 old_items = self.items.get(lvl, [])
69 self.items[lvl] = old_items + new_items
70
71 def add_requests(self, lvl, new_reqs):
72 old_reqs = self.requests.get(lvl, [])
73 self.requests[lvl] = old_reqs + new_reqs
74
75 def print_items(self, lvl=None, colour=True):
76 if lvl is None:
77 items = [item for lst in self.items.values() for item in lst]
78 else:
79 items = self.items.get(lvl, [])
80
81 print("# Scraped Items ", "-" * 60)
82 display.pprint([ItemAdapter(x).asdict() for x in items], colorize=colour)
83
84 def print_requests(self, lvl=None, colour=True):
85 if lvl is None:
86 if self.requests:
87 requests = self.requests[max(self.requests)]
88 else:
89 requests = []
90 else:
91 requests = self.requests.get(lvl, [])
92
93 print("# Requests ", "-" * 65)
94 display.pprint(requests, colorize=colour)
95
96 def print_results(self, opts):
97 colour = not opts.nocolour
98
99 if opts.verbose:
100 for level in range(1, self.max_level + 1):
101 print(f'\n>>> DEPTH LEVEL: {level} <<<')
102 if not opts.noitems:
103 self.print_items(level, colour)
104 if not opts.nolinks:
105 self.print_requests(level, colour)
106 else:
107 print(f'\n>>> STATUS DEPTH LEVEL {self.max_level} <<<')
108 if not opts.noitems:
109 self.print_items(colour=colour)
110 if not opts.nolinks:
111 self.print_requests(colour=colour)
112
113 def run_callback(self, response, callback, cb_kwargs=None):
114 cb_kwargs = cb_kwargs or {}
115 items, requests = [], []
116
117 for x in iterate_spider_output(callback(response, **cb_kwargs)):
118 if is_item(x):
119 items.append(x)
120 elif isinstance(x, Request):
121 requests.append(x)
122 return items, requests
123
124 def get_callback_from_rules(self, spider, response):
125 if getattr(spider, 'rules', None):
126 for rule in spider.rules:
127 if rule.link_extractor.matches(response.url):
128 return rule.callback or "parse"
129 else:
130 logger.error('No CrawlSpider rules found in spider %(spider)r, '
131 'please specify a callback to use for parsing',
132 {'spider': spider.name})
133
134 def set_spidercls(self, url, opts):
135 spider_loader = self.crawler_process.spider_loader
136 if opts.spider:
137 try:
138 self.spidercls = spider_loader.load(opts.spider)
139 except KeyError:
140 logger.error('Unable to find spider: %(spider)s',
141 {'spider': opts.spider})
142 else:
143 self.spidercls = spidercls_for_request(spider_loader, Request(url))
144 if not self.spidercls:
145 logger.error('Unable to find spider for: %(url)s', {'url': url})
146
147 def _start_requests(spider):
148 yield self.prepare_request(spider, Request(url), opts)
149 if self.spidercls:
150 self.spidercls.start_requests = _start_requests
151
152 def start_parsing(self, url, opts):
153 self.crawler_process.crawl(self.spidercls, **opts.spargs)
154 self.pcrawler = list(self.crawler_process.crawlers)[0]
155 self.crawler_process.start()
156
157 if not self.first_response:
158 logger.error('No response downloaded for: %(url)s',
159 {'url': url})
160
161 def prepare_request(self, spider, request, opts):
162 def callback(response, **cb_kwargs):
163 # memorize first request
164 if not self.first_response:
165 self.first_response = response
166
167 # determine real callback
168 cb = response.meta['_callback']
169 if not cb:
170 if opts.callback:
171 cb = opts.callback
172 elif opts.rules and self.first_response == response:
173 cb = self.get_callback_from_rules(spider, response)
174
175 if not cb:
176 logger.error('Cannot find a rule that matches %(url)r in spider: %(spider)s',
177 {'url': response.url, 'spider': spider.name})
178 return
179 else:
180 cb = 'parse'
181
182 if not callable(cb):
183 cb_method = getattr(spider, cb, None)
184 if callable(cb_method):
185 cb = cb_method
186 else:
187 logger.error('Cannot find callback %(callback)r in spider: %(spider)s',
188 {'callback': cb, 'spider': spider.name})
189 return
190
191 # parse items and requests
192 depth = response.meta['_depth']
193
194 items, requests = self.run_callback(response, cb, cb_kwargs)
195 if opts.pipelines:
196 itemproc = self.pcrawler.engine.scraper.itemproc
197 for item in items:
198 itemproc.process_item(item, spider)
199 self.add_items(depth, items)
200 self.add_requests(depth, requests)
201
202 scraped_data = items if opts.output else []
203 if depth < opts.depth:
204 for req in requests:
205 req.meta['_depth'] = depth + 1
206 req.meta['_callback'] = req.callback
207 req.callback = callback
208 scraped_data += requests
209
210 return scraped_data
211
212 # update request meta if any extra meta was passed through the --meta/-m opts.
213 if opts.meta:
214 request.meta.update(opts.meta)
215
216 # update cb_kwargs if any extra values were was passed through the --cbkwargs option.
217 if opts.cbkwargs:
218 request.cb_kwargs.update(opts.cbkwargs)
219
220 request.meta['_depth'] = 1
221 request.meta['_callback'] = request.callback
222 request.callback = callback
223 return request
224
225 def process_options(self, args, opts):
226 BaseRunSpiderCommand.process_options(self, args, opts)
227
228 self.process_request_meta(opts)
229 self.process_request_cb_kwargs(opts)
230
231 def process_request_meta(self, opts):
232 if opts.meta:
233 try:
234 opts.meta = json.loads(opts.meta)
235 except ValueError:
236 raise UsageError("Invalid -m/--meta value, pass a valid json string to -m or --meta. "
237 "Example: --meta='{\"foo\" : \"bar\"}'", print_help=False)
238
239 def process_request_cb_kwargs(self, opts):
240 if opts.cbkwargs:
241 try:
242 opts.cbkwargs = json.loads(opts.cbkwargs)
243 except ValueError:
244 raise UsageError("Invalid --cbkwargs value, pass a valid json string to --cbkwargs. "
245 "Example: --cbkwargs='{\"foo\" : \"bar\"}'", print_help=False)
246
247 def run(self, args, opts):
248 # parse arguments
249 if not len(args) == 1 or not is_url(args[0]):
250 raise UsageError()
251 else:
252 url = args[0]
253
254 # prepare spidercls
255 self.set_spidercls(url, opts)
256
257 if self.spidercls and opts.depth > 0:
258 self.start_parsing(url, opts)
259 self.print_results(opts)
260
[end of scrapy/commands/parse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/commands/parse.py b/scrapy/commands/parse.py
--- a/scrapy/commands/parse.py
+++ b/scrapy/commands/parse.py
@@ -5,6 +5,8 @@
from itemadapter import is_item, ItemAdapter
from w3lib.url import is_url
+from twisted.internet.defer import maybeDeferred
+
from scrapy.commands import BaseRunSpiderCommand
from scrapy.http import Request
from scrapy.utils import display
@@ -110,16 +112,19 @@
if not opts.nolinks:
self.print_requests(colour=colour)
- def run_callback(self, response, callback, cb_kwargs=None):
- cb_kwargs = cb_kwargs or {}
+ def _get_items_and_requests(self, spider_output, opts, depth, spider, callback):
items, requests = [], []
-
- for x in iterate_spider_output(callback(response, **cb_kwargs)):
+ for x in spider_output:
if is_item(x):
items.append(x)
elif isinstance(x, Request):
requests.append(x)
- return items, requests
+ return items, requests, opts, depth, spider, callback
+
+ def run_callback(self, response, callback, cb_kwargs=None):
+ cb_kwargs = cb_kwargs or {}
+ d = maybeDeferred(iterate_spider_output, callback(response, **cb_kwargs))
+ return d
def get_callback_from_rules(self, spider, response):
if getattr(spider, 'rules', None):
@@ -158,6 +163,25 @@
logger.error('No response downloaded for: %(url)s',
{'url': url})
+ def scraped_data(self, args):
+ items, requests, opts, depth, spider, callback = args
+ if opts.pipelines:
+ itemproc = self.pcrawler.engine.scraper.itemproc
+ for item in items:
+ itemproc.process_item(item, spider)
+ self.add_items(depth, items)
+ self.add_requests(depth, requests)
+
+ scraped_data = items if opts.output else []
+ if depth < opts.depth:
+ for req in requests:
+ req.meta['_depth'] = depth + 1
+ req.meta['_callback'] = req.callback
+ req.callback = callback
+ scraped_data += requests
+
+ return scraped_data
+
def prepare_request(self, spider, request, opts):
def callback(response, **cb_kwargs):
# memorize first request
@@ -191,23 +215,10 @@
# parse items and requests
depth = response.meta['_depth']
- items, requests = self.run_callback(response, cb, cb_kwargs)
- if opts.pipelines:
- itemproc = self.pcrawler.engine.scraper.itemproc
- for item in items:
- itemproc.process_item(item, spider)
- self.add_items(depth, items)
- self.add_requests(depth, requests)
-
- scraped_data = items if opts.output else []
- if depth < opts.depth:
- for req in requests:
- req.meta['_depth'] = depth + 1
- req.meta['_callback'] = req.callback
- req.callback = callback
- scraped_data += requests
-
- return scraped_data
+ d = self.run_callback(response, cb, cb_kwargs)
+ d.addCallback(self._get_items_and_requests, opts, depth, spider, callback)
+ d.addCallback(self.scraped_data)
+ return d
# update request meta if any extra meta was passed through the --meta/-m opts.
if opts.meta:
|
{"golden_diff": "diff --git a/scrapy/commands/parse.py b/scrapy/commands/parse.py\n--- a/scrapy/commands/parse.py\n+++ b/scrapy/commands/parse.py\n@@ -5,6 +5,8 @@\n from itemadapter import is_item, ItemAdapter\n from w3lib.url import is_url\n \n+from twisted.internet.defer import maybeDeferred\n+\n from scrapy.commands import BaseRunSpiderCommand\n from scrapy.http import Request\n from scrapy.utils import display\n@@ -110,16 +112,19 @@\n if not opts.nolinks:\n self.print_requests(colour=colour)\n \n- def run_callback(self, response, callback, cb_kwargs=None):\n- cb_kwargs = cb_kwargs or {}\n+ def _get_items_and_requests(self, spider_output, opts, depth, spider, callback):\n items, requests = [], []\n-\n- for x in iterate_spider_output(callback(response, **cb_kwargs)):\n+ for x in spider_output:\n if is_item(x):\n items.append(x)\n elif isinstance(x, Request):\n requests.append(x)\n- return items, requests\n+ return items, requests, opts, depth, spider, callback\n+\n+ def run_callback(self, response, callback, cb_kwargs=None):\n+ cb_kwargs = cb_kwargs or {}\n+ d = maybeDeferred(iterate_spider_output, callback(response, **cb_kwargs))\n+ return d\n \n def get_callback_from_rules(self, spider, response):\n if getattr(spider, 'rules', None):\n@@ -158,6 +163,25 @@\n logger.error('No response downloaded for: %(url)s',\n {'url': url})\n \n+ def scraped_data(self, args):\n+ items, requests, opts, depth, spider, callback = args\n+ if opts.pipelines:\n+ itemproc = self.pcrawler.engine.scraper.itemproc\n+ for item in items:\n+ itemproc.process_item(item, spider)\n+ self.add_items(depth, items)\n+ self.add_requests(depth, requests)\n+\n+ scraped_data = items if opts.output else []\n+ if depth < opts.depth:\n+ for req in requests:\n+ req.meta['_depth'] = depth + 1\n+ req.meta['_callback'] = req.callback\n+ req.callback = callback\n+ scraped_data += requests\n+\n+ return scraped_data\n+\n def prepare_request(self, spider, request, opts):\n def callback(response, **cb_kwargs):\n # memorize first request\n@@ -191,23 +215,10 @@\n # parse items and requests\n depth = response.meta['_depth']\n \n- items, requests = self.run_callback(response, cb, cb_kwargs)\n- if opts.pipelines:\n- itemproc = self.pcrawler.engine.scraper.itemproc\n- for item in items:\n- itemproc.process_item(item, spider)\n- self.add_items(depth, items)\n- self.add_requests(depth, requests)\n-\n- scraped_data = items if opts.output else []\n- if depth < opts.depth:\n- for req in requests:\n- req.meta['_depth'] = depth + 1\n- req.meta['_callback'] = req.callback\n- req.callback = callback\n- scraped_data += requests\n-\n- return scraped_data\n+ d = self.run_callback(response, cb, cb_kwargs)\n+ d.addCallback(self._get_items_and_requests, opts, depth, spider, callback)\n+ d.addCallback(self.scraped_data)\n+ return d\n \n # update request meta if any extra meta was passed through the --meta/-m opts.\n if opts.meta:\n", "issue": "scrapy parse doesn't support async callbacks\nIn master when running `scrapy parse` for a spider with `async def parse` the page is downloaded but then the spider hangs. In #4978 it instead raises `TypeError: 'async_generator' object is not iterable`. Both problems happen because the parse command calls `iterate_spider_output` and doesn't expect a Deferred or an async iterator. \nscrapy parse doesn't support async callbacks\nIn master when running `scrapy parse` for a spider with `async def parse` the page is downloaded but then the spider hangs. In #4978 it instead raises `TypeError: 'async_generator' object is not iterable`. Both problems happen because the parse command calls `iterate_spider_output` and doesn't expect a Deferred or an async iterator. \n", "before_files": [{"content": "import json\nimport logging\nfrom typing import Dict\n\nfrom itemadapter import is_item, ItemAdapter\nfrom w3lib.url import is_url\n\nfrom scrapy.commands import BaseRunSpiderCommand\nfrom scrapy.http import Request\nfrom scrapy.utils import display\nfrom scrapy.utils.spider import iterate_spider_output, spidercls_for_request\nfrom scrapy.exceptions import UsageError\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseRunSpiderCommand):\n requires_project = True\n\n spider = None\n items: Dict[int, list] = {}\n requests: Dict[int, list] = {}\n\n first_response = None\n\n def syntax(self):\n return \"[options] <url>\"\n\n def short_desc(self):\n return \"Parse URL (using its spider) and print the results\"\n\n def add_options(self, parser):\n BaseRunSpiderCommand.add_options(self, parser)\n parser.add_argument(\"--spider\", dest=\"spider\", default=None,\n help=\"use this spider without looking for one\")\n parser.add_argument(\"--pipelines\", action=\"store_true\",\n help=\"process items through pipelines\")\n parser.add_argument(\"--nolinks\", dest=\"nolinks\", action=\"store_true\",\n help=\"don't show links to follow (extracted requests)\")\n parser.add_argument(\"--noitems\", dest=\"noitems\", action=\"store_true\",\n help=\"don't show scraped items\")\n parser.add_argument(\"--nocolour\", dest=\"nocolour\", action=\"store_true\",\n help=\"avoid using pygments to colorize the output\")\n parser.add_argument(\"-r\", \"--rules\", dest=\"rules\", action=\"store_true\",\n help=\"use CrawlSpider rules to discover the callback\")\n parser.add_argument(\"-c\", \"--callback\", dest=\"callback\",\n help=\"use this callback for parsing, instead looking for a callback\")\n parser.add_argument(\"-m\", \"--meta\", dest=\"meta\",\n help=\"inject extra meta into the Request, it must be a valid raw json string\")\n parser.add_argument(\"--cbkwargs\", dest=\"cbkwargs\",\n help=\"inject extra callback kwargs into the Request, it must be a valid raw json string\")\n parser.add_argument(\"-d\", \"--depth\", dest=\"depth\", type=int, default=1,\n help=\"maximum depth for parsing requests [default: %(default)s]\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\",\n help=\"print each depth level one by one\")\n\n @property\n def max_level(self):\n max_items, max_requests = 0, 0\n if self.items:\n max_items = max(self.items)\n if self.requests:\n max_requests = max(self.requests)\n return max(max_items, max_requests)\n\n def add_items(self, lvl, new_items):\n old_items = self.items.get(lvl, [])\n self.items[lvl] = old_items + new_items\n\n def add_requests(self, lvl, new_reqs):\n old_reqs = self.requests.get(lvl, [])\n self.requests[lvl] = old_reqs + new_reqs\n\n def print_items(self, lvl=None, colour=True):\n if lvl is None:\n items = [item for lst in self.items.values() for item in lst]\n else:\n items = self.items.get(lvl, [])\n\n print(\"# Scraped Items \", \"-\" * 60)\n display.pprint([ItemAdapter(x).asdict() for x in items], colorize=colour)\n\n def print_requests(self, lvl=None, colour=True):\n if lvl is None:\n if self.requests:\n requests = self.requests[max(self.requests)]\n else:\n requests = []\n else:\n requests = self.requests.get(lvl, [])\n\n print(\"# Requests \", \"-\" * 65)\n display.pprint(requests, colorize=colour)\n\n def print_results(self, opts):\n colour = not opts.nocolour\n\n if opts.verbose:\n for level in range(1, self.max_level + 1):\n print(f'\\n>>> DEPTH LEVEL: {level} <<<')\n if not opts.noitems:\n self.print_items(level, colour)\n if not opts.nolinks:\n self.print_requests(level, colour)\n else:\n print(f'\\n>>> STATUS DEPTH LEVEL {self.max_level} <<<')\n if not opts.noitems:\n self.print_items(colour=colour)\n if not opts.nolinks:\n self.print_requests(colour=colour)\n\n def run_callback(self, response, callback, cb_kwargs=None):\n cb_kwargs = cb_kwargs or {}\n items, requests = [], []\n\n for x in iterate_spider_output(callback(response, **cb_kwargs)):\n if is_item(x):\n items.append(x)\n elif isinstance(x, Request):\n requests.append(x)\n return items, requests\n\n def get_callback_from_rules(self, spider, response):\n if getattr(spider, 'rules', None):\n for rule in spider.rules:\n if rule.link_extractor.matches(response.url):\n return rule.callback or \"parse\"\n else:\n logger.error('No CrawlSpider rules found in spider %(spider)r, '\n 'please specify a callback to use for parsing',\n {'spider': spider.name})\n\n def set_spidercls(self, url, opts):\n spider_loader = self.crawler_process.spider_loader\n if opts.spider:\n try:\n self.spidercls = spider_loader.load(opts.spider)\n except KeyError:\n logger.error('Unable to find spider: %(spider)s',\n {'spider': opts.spider})\n else:\n self.spidercls = spidercls_for_request(spider_loader, Request(url))\n if not self.spidercls:\n logger.error('Unable to find spider for: %(url)s', {'url': url})\n\n def _start_requests(spider):\n yield self.prepare_request(spider, Request(url), opts)\n if self.spidercls:\n self.spidercls.start_requests = _start_requests\n\n def start_parsing(self, url, opts):\n self.crawler_process.crawl(self.spidercls, **opts.spargs)\n self.pcrawler = list(self.crawler_process.crawlers)[0]\n self.crawler_process.start()\n\n if not self.first_response:\n logger.error('No response downloaded for: %(url)s',\n {'url': url})\n\n def prepare_request(self, spider, request, opts):\n def callback(response, **cb_kwargs):\n # memorize first request\n if not self.first_response:\n self.first_response = response\n\n # determine real callback\n cb = response.meta['_callback']\n if not cb:\n if opts.callback:\n cb = opts.callback\n elif opts.rules and self.first_response == response:\n cb = self.get_callback_from_rules(spider, response)\n\n if not cb:\n logger.error('Cannot find a rule that matches %(url)r in spider: %(spider)s',\n {'url': response.url, 'spider': spider.name})\n return\n else:\n cb = 'parse'\n\n if not callable(cb):\n cb_method = getattr(spider, cb, None)\n if callable(cb_method):\n cb = cb_method\n else:\n logger.error('Cannot find callback %(callback)r in spider: %(spider)s',\n {'callback': cb, 'spider': spider.name})\n return\n\n # parse items and requests\n depth = response.meta['_depth']\n\n items, requests = self.run_callback(response, cb, cb_kwargs)\n if opts.pipelines:\n itemproc = self.pcrawler.engine.scraper.itemproc\n for item in items:\n itemproc.process_item(item, spider)\n self.add_items(depth, items)\n self.add_requests(depth, requests)\n\n scraped_data = items if opts.output else []\n if depth < opts.depth:\n for req in requests:\n req.meta['_depth'] = depth + 1\n req.meta['_callback'] = req.callback\n req.callback = callback\n scraped_data += requests\n\n return scraped_data\n\n # update request meta if any extra meta was passed through the --meta/-m opts.\n if opts.meta:\n request.meta.update(opts.meta)\n\n # update cb_kwargs if any extra values were was passed through the --cbkwargs option.\n if opts.cbkwargs:\n request.cb_kwargs.update(opts.cbkwargs)\n\n request.meta['_depth'] = 1\n request.meta['_callback'] = request.callback\n request.callback = callback\n return request\n\n def process_options(self, args, opts):\n BaseRunSpiderCommand.process_options(self, args, opts)\n\n self.process_request_meta(opts)\n self.process_request_cb_kwargs(opts)\n\n def process_request_meta(self, opts):\n if opts.meta:\n try:\n opts.meta = json.loads(opts.meta)\n except ValueError:\n raise UsageError(\"Invalid -m/--meta value, pass a valid json string to -m or --meta. \"\n \"Example: --meta='{\\\"foo\\\" : \\\"bar\\\"}'\", print_help=False)\n\n def process_request_cb_kwargs(self, opts):\n if opts.cbkwargs:\n try:\n opts.cbkwargs = json.loads(opts.cbkwargs)\n except ValueError:\n raise UsageError(\"Invalid --cbkwargs value, pass a valid json string to --cbkwargs. \"\n \"Example: --cbkwargs='{\\\"foo\\\" : \\\"bar\\\"}'\", print_help=False)\n\n def run(self, args, opts):\n # parse arguments\n if not len(args) == 1 or not is_url(args[0]):\n raise UsageError()\n else:\n url = args[0]\n\n # prepare spidercls\n self.set_spidercls(url, opts)\n\n if self.spidercls and opts.depth > 0:\n self.start_parsing(url, opts)\n self.print_results(opts)\n", "path": "scrapy/commands/parse.py"}]}
| 3,490 | 795 |
gh_patches_debug_14959
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-4439
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Gnome-initial-setup is broken
https://build.gnome.org/continuous/buildmaster/builds/2018/10/25/27/build/log-gnome-initial-setup.txt
Git bisect says the cause is this: https://github.com/mesonbuild/meson/commit/176af2c8c33
</issue>
<code>
[start of mesonbuild/modules/i18n.py]
1 # Copyright 2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import shutil
16
17 from os import path
18 from .. import coredata, mesonlib, build
19 from ..mesonlib import MesonException
20 from . import ModuleReturnValue
21 from . import ExtensionModule
22 from ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs
23
24 PRESET_ARGS = {
25 'glib': [
26 '--from-code=UTF-8',
27 '--add-comments',
28
29 # https://developer.gnome.org/glib/stable/glib-I18N.html
30 '--keyword=_',
31 '--keyword=N_',
32 '--keyword=C_:1c,2',
33 '--keyword=NC_:1c,2',
34 '--keyword=g_dcgettext:2',
35 '--keyword=g_dngettext:2,3',
36 '--keyword=g_dpgettext2:2c,3',
37
38 '--flag=N_:1:pass-c-format',
39 '--flag=C_:2:pass-c-format',
40 '--flag=NC_:2:pass-c-format',
41 '--flag=g_dngettext:2:pass-c-format',
42 '--flag=g_strdup_printf:1:c-format',
43 '--flag=g_string_printf:2:c-format',
44 '--flag=g_string_append_printf:2:c-format',
45 '--flag=g_error_new:3:c-format',
46 '--flag=g_set_error:4:c-format',
47 ]
48 }
49
50 class I18nModule(ExtensionModule):
51
52 @staticmethod
53 def _get_data_dirs(state, dirs):
54 """Returns source directories of relative paths"""
55 src_dir = path.join(state.environment.get_source_dir(), state.subdir)
56 return [path.join(src_dir, d) for d in dirs]
57
58 @FeatureNew('i18n.merge_file', '0.37.0')
59 @permittedKwargs({'languages', 'data_dirs', 'preset', 'args', 'po_dir', 'type',
60 'input', 'output', 'install', 'install_dir'})
61 def merge_file(self, state, args, kwargs):
62 podir = kwargs.pop('po_dir', None)
63 if not podir:
64 raise MesonException('i18n: po_dir is a required kwarg')
65 podir = path.join(state.build_to_src, state.subdir, podir)
66
67 file_type = kwargs.pop('type', 'xml')
68 VALID_TYPES = ('xml', 'desktop')
69 if file_type not in VALID_TYPES:
70 raise MesonException('i18n: "{}" is not a valid type {}'.format(file_type, VALID_TYPES))
71
72 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))
73 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None
74
75 command = state.environment.get_build_command() + [
76 '--internal', 'msgfmthelper',
77 '@INPUT@', '@OUTPUT@', file_type, podir
78 ]
79 if datadirs:
80 command.append(datadirs)
81
82 kwargs['command'] = command
83
84 inputfile = kwargs['input']
85 if hasattr(inputfile, 'held_object'):
86 ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)
87 else:
88 if isinstance(inputfile, str):
89 inputfile = mesonlib.File.from_source_file(state.environment.source_dir,
90 state.subdir, inputfile)
91 output = kwargs['output']
92 ifile_abs = inputfile.absolute_path(state.environment.source_dir,
93 state.environment.build_dir)
94 values = mesonlib.get_filenames_templates_dict([ifile_abs], None)
95 outputs = mesonlib.substitute_values([output], values)
96 output = outputs[0]
97 ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\', '@') + '_merge', state.subdir, state.subproject, kwargs)
98 return ModuleReturnValue(ct, [ct])
99
100 @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])
101 @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install'})
102 def gettext(self, state, args, kwargs):
103 if len(args) != 1:
104 raise coredata.MesonException('Gettext requires one positional argument (package name).')
105 if not shutil.which('xgettext'):
106 raise coredata.MesonException('Can not do gettext because xgettext is not installed.')
107 packagename = args[0]
108 languages = mesonlib.stringlistify(kwargs.get('languages', []))
109 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))
110 extra_args = mesonlib.stringlistify(kwargs.get('args', []))
111
112 preset = kwargs.pop('preset', None)
113 if preset:
114 preset_args = PRESET_ARGS.get(preset)
115 if not preset_args:
116 raise coredata.MesonException('i18n: Preset "{}" is not one of the valid options: {}'.format(
117 preset, list(PRESET_ARGS.keys())))
118 extra_args = set(preset_args + extra_args)
119
120 pkg_arg = '--pkgname=' + packagename
121 lang_arg = '--langs=' + '@@'.join(languages) if languages else None
122 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None
123 extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None
124
125 potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]
126 if datadirs:
127 potargs.append(datadirs)
128 if extra_args:
129 potargs.append(extra_args)
130 pottarget = build.RunTarget(packagename + '-pot', potargs[0], potargs[1:], [], state.subdir, state.subproject)
131
132 gmoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'gen_gmo']
133 if lang_arg:
134 gmoargs.append(lang_arg)
135 gmotarget = build.RunTarget(packagename + '-gmo', gmoargs[0], gmoargs[1:], [], state.subdir, state.subproject)
136
137 updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]
138 if lang_arg:
139 updatepoargs.append(lang_arg)
140 if datadirs:
141 updatepoargs.append(datadirs)
142 if extra_args:
143 updatepoargs.append(extra_args)
144 updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs[0], updatepoargs[1:], [], state.subdir, state.subproject)
145
146 targets = [pottarget, gmotarget, updatepotarget]
147
148 install = kwargs.get('install', True)
149 if install:
150 script = state.environment.get_build_command()
151 args = ['--internal', 'gettext', 'install',
152 '--subdir=' + state.subdir,
153 '--localedir=' + state.environment.coredata.get_builtin_option('localedir'),
154 pkg_arg]
155 if lang_arg:
156 args.append(lang_arg)
157 iscript = build.RunScript(script, args)
158 targets.append(iscript)
159
160 return ModuleReturnValue(None, targets)
161
162 def initialize(*args, **kwargs):
163 return I18nModule(*args, **kwargs)
164
[end of mesonbuild/modules/i18n.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py
--- a/mesonbuild/modules/i18n.py
+++ b/mesonbuild/modules/i18n.py
@@ -85,6 +85,10 @@
if hasattr(inputfile, 'held_object'):
ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)
else:
+ if isinstance(inputfile, list):
+ # We only use this input file to create a name of the custom target.
+ # Thus we can ignore the other entries.
+ inputfile = inputfile[0]
if isinstance(inputfile, str):
inputfile = mesonlib.File.from_source_file(state.environment.source_dir,
state.subdir, inputfile)
|
{"golden_diff": "diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py\n--- a/mesonbuild/modules/i18n.py\n+++ b/mesonbuild/modules/i18n.py\n@@ -85,6 +85,10 @@\n if hasattr(inputfile, 'held_object'):\n ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\n else:\n+ if isinstance(inputfile, list):\n+ # We only use this input file to create a name of the custom target.\n+ # Thus we can ignore the other entries.\n+ inputfile = inputfile[0]\n if isinstance(inputfile, str):\n inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, inputfile)\n", "issue": "Gnome-initial-setup is broken\nhttps://build.gnome.org/continuous/buildmaster/builds/2018/10/25/27/build/log-gnome-initial-setup.txt\r\n\r\nGit bisect says the cause is this: https://github.com/mesonbuild/meson/commit/176af2c8c33\n", "before_files": [{"content": "# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\n\nfrom os import path\nfrom .. import coredata, mesonlib, build\nfrom ..mesonlib import MesonException\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs\n\nPRESET_ARGS = {\n 'glib': [\n '--from-code=UTF-8',\n '--add-comments',\n\n # https://developer.gnome.org/glib/stable/glib-I18N.html\n '--keyword=_',\n '--keyword=N_',\n '--keyword=C_:1c,2',\n '--keyword=NC_:1c,2',\n '--keyword=g_dcgettext:2',\n '--keyword=g_dngettext:2,3',\n '--keyword=g_dpgettext2:2c,3',\n\n '--flag=N_:1:pass-c-format',\n '--flag=C_:2:pass-c-format',\n '--flag=NC_:2:pass-c-format',\n '--flag=g_dngettext:2:pass-c-format',\n '--flag=g_strdup_printf:1:c-format',\n '--flag=g_string_printf:2:c-format',\n '--flag=g_string_append_printf:2:c-format',\n '--flag=g_error_new:3:c-format',\n '--flag=g_set_error:4:c-format',\n ]\n}\n\nclass I18nModule(ExtensionModule):\n\n @staticmethod\n def _get_data_dirs(state, dirs):\n \"\"\"Returns source directories of relative paths\"\"\"\n src_dir = path.join(state.environment.get_source_dir(), state.subdir)\n return [path.join(src_dir, d) for d in dirs]\n\n @FeatureNew('i18n.merge_file', '0.37.0')\n @permittedKwargs({'languages', 'data_dirs', 'preset', 'args', 'po_dir', 'type',\n 'input', 'output', 'install', 'install_dir'})\n def merge_file(self, state, args, kwargs):\n podir = kwargs.pop('po_dir', None)\n if not podir:\n raise MesonException('i18n: po_dir is a required kwarg')\n podir = path.join(state.build_to_src, state.subdir, podir)\n\n file_type = kwargs.pop('type', 'xml')\n VALID_TYPES = ('xml', 'desktop')\n if file_type not in VALID_TYPES:\n raise MesonException('i18n: \"{}\" is not a valid type {}'.format(file_type, VALID_TYPES))\n\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n\n command = state.environment.get_build_command() + [\n '--internal', 'msgfmthelper',\n '@INPUT@', '@OUTPUT@', file_type, podir\n ]\n if datadirs:\n command.append(datadirs)\n\n kwargs['command'] = command\n\n inputfile = kwargs['input']\n if hasattr(inputfile, 'held_object'):\n ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\n else:\n if isinstance(inputfile, str):\n inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, inputfile)\n output = kwargs['output']\n ifile_abs = inputfile.absolute_path(state.environment.source_dir,\n state.environment.build_dir)\n values = mesonlib.get_filenames_templates_dict([ifile_abs], None)\n outputs = mesonlib.substitute_values([output], values)\n output = outputs[0]\n ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\\\', '@') + '_merge', state.subdir, state.subproject, kwargs)\n return ModuleReturnValue(ct, [ct])\n\n @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])\n @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install'})\n def gettext(self, state, args, kwargs):\n if len(args) != 1:\n raise coredata.MesonException('Gettext requires one positional argument (package name).')\n if not shutil.which('xgettext'):\n raise coredata.MesonException('Can not do gettext because xgettext is not installed.')\n packagename = args[0]\n languages = mesonlib.stringlistify(kwargs.get('languages', []))\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n\n preset = kwargs.pop('preset', None)\n if preset:\n preset_args = PRESET_ARGS.get(preset)\n if not preset_args:\n raise coredata.MesonException('i18n: Preset \"{}\" is not one of the valid options: {}'.format(\n preset, list(PRESET_ARGS.keys())))\n extra_args = set(preset_args + extra_args)\n\n pkg_arg = '--pkgname=' + packagename\n lang_arg = '--langs=' + '@@'.join(languages) if languages else None\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\n\n potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]\n if datadirs:\n potargs.append(datadirs)\n if extra_args:\n potargs.append(extra_args)\n pottarget = build.RunTarget(packagename + '-pot', potargs[0], potargs[1:], [], state.subdir, state.subproject)\n\n gmoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'gen_gmo']\n if lang_arg:\n gmoargs.append(lang_arg)\n gmotarget = build.RunTarget(packagename + '-gmo', gmoargs[0], gmoargs[1:], [], state.subdir, state.subproject)\n\n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n updatepoargs.append(lang_arg)\n if datadirs:\n updatepoargs.append(datadirs)\n if extra_args:\n updatepoargs.append(extra_args)\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs[0], updatepoargs[1:], [], state.subdir, state.subproject)\n\n targets = [pottarget, gmotarget, updatepotarget]\n\n install = kwargs.get('install', True)\n if install:\n script = state.environment.get_build_command()\n args = ['--internal', 'gettext', 'install',\n '--subdir=' + state.subdir,\n '--localedir=' + state.environment.coredata.get_builtin_option('localedir'),\n pkg_arg]\n if lang_arg:\n args.append(lang_arg)\n iscript = build.RunScript(script, args)\n targets.append(iscript)\n\n return ModuleReturnValue(None, targets)\n\ndef initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n", "path": "mesonbuild/modules/i18n.py"}]}
| 2,729 | 178 |
gh_patches_debug_20513
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-1896
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'
https://beta.getsentry.com/sentry/sentry/group/56738068/
```
AttributeError: 'int' object has no attribute 'lower'
(11 additional frame(s) were not displayed)
...
File "sentry/interfaces/user.py", line 99, in to_email_html
return render_to_string('sentry/partial/interfaces/user_email.html', context)
File "sentry/web/helpers.py", line 122, in render_to_string
return loader.render_to_string(template, context)
File "templatetag_sugar/node.py", line 19, in render
return self.function(context, *args, **kwargs)
File "sentry/templatetags/sentry_helpers.py", line 314, in gravatar_url
return get_gravatar_url(email, size, default)
File "sentry/utils/avatar.py", line 18, in get_gravatar_url
hashlib.md5(email.lower()).hexdigest())
Stacktrace (most recent call last):
Error processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'
File "sentry/partial/interfaces/user_email.html", line 7
<tr>
{% if user_email %}
<td style="width:84px;padding-top:10px;vertical-align:top">
<img src="{% gravatar_url user_email size 64 %}">
</td>
{% endif %}
<td>
```
</issue>
<code>
[start of src/sentry/interfaces/user.py]
1 """
2 sentry.interfaces.user
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import
9
10 __all__ = ('User',)
11
12 from sentry.interfaces.base import Interface
13 from sentry.utils.safe import trim, trim_dict
14 from sentry.web.helpers import render_to_string
15 from ipaddr import IPAddress
16
17
18 def validate_ip(value, required=True):
19 if not required and not value:
20 return
21
22 # will raise a ValueError
23 IPAddress(value)
24 return value
25
26
27 class User(Interface):
28 """
29 An interface which describes the authenticated User for a request.
30
31 You should provide **at least** either an `id` (a unique identifier for
32 an authenticated user) or `ip_address` (their IP address).
33
34 All other attributes are optional.
35
36 >>> {
37 >>> "id": "unique_id",
38 >>> "username": "my_user",
39 >>> "email": "[email protected]"
40 >>> "ip_address": "127.0.0.1",
41 >>> "optional": "value"
42 >>> }
43 """
44 @classmethod
45 def to_python(cls, data):
46 data = data.copy()
47
48 extra_data = data.pop('data', data)
49 if not isinstance(extra_data, dict):
50 extra_data = {}
51
52 kwargs = {
53 'id': trim(data.pop('id', None), 128),
54 'email': trim(data.pop('email', None), 128),
55 'username': trim(data.pop('username', None), 128),
56 'ip_address': validate_ip(data.pop('ip_address', None), False),
57 }
58
59 kwargs['data'] = trim_dict(extra_data)
60 return cls(**kwargs)
61
62 def get_api_context(self, is_public=False):
63 return {
64 'id': self.id,
65 'email': self.email,
66 'username': self.username,
67 'ipAddress': self.ip_address,
68 'data': self.data,
69 }
70
71 def get_path(self):
72 return 'sentry.interfaces.User'
73
74 def get_hash(self):
75 return []
76
77 def to_email_html(self, event, **kwargs):
78 context = {
79 'user_id': self.id,
80 'user_email': self.email,
81 'user_username': self.username,
82 'user_ip_address': self.ip_address,
83 'user_data': self.data,
84 }
85 return render_to_string('sentry/partial/interfaces/user_email.html', context)
86
[end of src/sentry/interfaces/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/interfaces/user.py b/src/sentry/interfaces/user.py
--- a/src/sentry/interfaces/user.py
+++ b/src/sentry/interfaces/user.py
@@ -15,6 +15,19 @@
from ipaddr import IPAddress
+def validate_email(value, required=True):
+ if not required and not value:
+ return
+
+ if not isinstance(value, basestring):
+ raise TypeError('object of type %r is not an email address' % type(value).__name__)
+
+ # safe to assume an email address at least has a @ in it.
+ if '@' not in value:
+ raise ValueError('malformed email address')
+ return value
+
+
def validate_ip(value, required=True):
if not required and not value:
return
@@ -51,7 +64,7 @@
kwargs = {
'id': trim(data.pop('id', None), 128),
- 'email': trim(data.pop('email', None), 128),
+ 'email': trim(validate_email(data.pop('email', None), False), 128),
'username': trim(data.pop('username', None), 128),
'ip_address': validate_ip(data.pop('ip_address', None), False),
}
|
{"golden_diff": "diff --git a/src/sentry/interfaces/user.py b/src/sentry/interfaces/user.py\n--- a/src/sentry/interfaces/user.py\n+++ b/src/sentry/interfaces/user.py\n@@ -15,6 +15,19 @@\n from ipaddr import IPAddress\n \n \n+def validate_email(value, required=True):\n+ if not required and not value:\n+ return\n+\n+ if not isinstance(value, basestring):\n+ raise TypeError('object of type %r is not an email address' % type(value).__name__)\n+\n+ # safe to assume an email address at least has a @ in it.\n+ if '@' not in value:\n+ raise ValueError('malformed email address')\n+ return value\n+\n+\n def validate_ip(value, required=True):\n if not required and not value:\n return\n@@ -51,7 +64,7 @@\n \n kwargs = {\n 'id': trim(data.pop('id', None), 128),\n- 'email': trim(data.pop('email', None), 128),\n+ 'email': trim(validate_email(data.pop('email', None), False), 128),\n 'username': trim(data.pop('username', None), 128),\n 'ip_address': validate_ip(data.pop('ip_address', None), False),\n }\n", "issue": "Error processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'\nhttps://beta.getsentry.com/sentry/sentry/group/56738068/\n\n```\nAttributeError: 'int' object has no attribute 'lower'\n(11 additional frame(s) were not displayed)\n...\n File \"sentry/interfaces/user.py\", line 99, in to_email_html\n return render_to_string('sentry/partial/interfaces/user_email.html', context)\n File \"sentry/web/helpers.py\", line 122, in render_to_string\n return loader.render_to_string(template, context)\n File \"templatetag_sugar/node.py\", line 19, in render\n return self.function(context, *args, **kwargs)\n File \"sentry/templatetags/sentry_helpers.py\", line 314, in gravatar_url\n return get_gravatar_url(email, size, default)\n File \"sentry/utils/avatar.py\", line 18, in get_gravatar_url\n hashlib.md5(email.lower()).hexdigest())\n\nStacktrace (most recent call last):\n\nError processing 'rule_notify' on 'QuotaMailPlugin': 'int' object has no attribute 'lower'\n\nFile \"sentry/partial/interfaces/user_email.html\", line 7\n\n <tr>\n {% if user_email %}\n <td style=\"width:84px;padding-top:10px;vertical-align:top\">\n <img src=\"{% gravatar_url user_email size 64 %}\">\n </td>\n {% endif %}\n <td>\n```\n\n", "before_files": [{"content": "\"\"\"\nsentry.interfaces.user\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import\n\n__all__ = ('User',)\n\nfrom sentry.interfaces.base import Interface\nfrom sentry.utils.safe import trim, trim_dict\nfrom sentry.web.helpers import render_to_string\nfrom ipaddr import IPAddress\n\n\ndef validate_ip(value, required=True):\n if not required and not value:\n return\n\n # will raise a ValueError\n IPAddress(value)\n return value\n\n\nclass User(Interface):\n \"\"\"\n An interface which describes the authenticated User for a request.\n\n You should provide **at least** either an `id` (a unique identifier for\n an authenticated user) or `ip_address` (their IP address).\n\n All other attributes are optional.\n\n >>> {\n >>> \"id\": \"unique_id\",\n >>> \"username\": \"my_user\",\n >>> \"email\": \"[email protected]\"\n >>> \"ip_address\": \"127.0.0.1\",\n >>> \"optional\": \"value\"\n >>> }\n \"\"\"\n @classmethod\n def to_python(cls, data):\n data = data.copy()\n\n extra_data = data.pop('data', data)\n if not isinstance(extra_data, dict):\n extra_data = {}\n\n kwargs = {\n 'id': trim(data.pop('id', None), 128),\n 'email': trim(data.pop('email', None), 128),\n 'username': trim(data.pop('username', None), 128),\n 'ip_address': validate_ip(data.pop('ip_address', None), False),\n }\n\n kwargs['data'] = trim_dict(extra_data)\n return cls(**kwargs)\n\n def get_api_context(self, is_public=False):\n return {\n 'id': self.id,\n 'email': self.email,\n 'username': self.username,\n 'ipAddress': self.ip_address,\n 'data': self.data,\n }\n\n def get_path(self):\n return 'sentry.interfaces.User'\n\n def get_hash(self):\n return []\n\n def to_email_html(self, event, **kwargs):\n context = {\n 'user_id': self.id,\n 'user_email': self.email,\n 'user_username': self.username,\n 'user_ip_address': self.ip_address,\n 'user_data': self.data,\n }\n return render_to_string('sentry/partial/interfaces/user_email.html', context)\n", "path": "src/sentry/interfaces/user.py"}]}
| 1,609 | 284 |
gh_patches_debug_16562
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-4047
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] change in 1.12.0 causes S3 SSLError
Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) for additional information about bug reports. For help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).
**Please fill in this bug report template to ensure a timely and thorough response.**
### Willingness to contribute
The MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?
- [ ] Yes. I can contribute a fix for this bug independently.
- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.
- [ x] No. I cannot contribute a bug fix at this time.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**:
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS 10.15.6
- **MLflow installed from (source or binary)**: binary
- **MLflow version (run ``mlflow --version``)**: 1.12.0
- **Python version**: 3.8.5
- **npm version, if running the dev UI**:
- **Exact command to reproduce**:
S3ArtifactRepository(my_uri).list_artifacts(my_path)
### Describe the problem
Describe the problem clearly here. Include descriptions of the expected behavior and the actual behavior.
As of this version I'm getting an error:
"certificate verify failed: self signed certificate in certificate chain."
I'm working with private corporate endpoints. This worked fine before 1.12.0. I've isolated it to changes in mlflow.store.artifact.s3_artifact_repo, line 54. Simply adding verify=True explicitly seems to have caused this, even though this should be the default in boto3.
### Code to reproduce issue
Provide a reproducible test case that is the bare minimum necessary to generate the problem.
Tricky
### Other info / logs
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.
### What component(s), interfaces, languages, and integrations does this bug affect?
Components
- [x ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
</issue>
<code>
[start of mlflow/store/artifact/s3_artifact_repo.py]
1 import os
2 from mimetypes import guess_type
3
4 import posixpath
5 import urllib.parse
6
7 from mlflow import data
8 from mlflow.entities import FileInfo
9 from mlflow.exceptions import MlflowException
10 from mlflow.store.artifact.artifact_repo import ArtifactRepository
11 from mlflow.utils.file_utils import relative_path_to_artifact_path
12
13
14 class S3ArtifactRepository(ArtifactRepository):
15 """Stores artifacts on Amazon S3."""
16
17 @staticmethod
18 def parse_s3_uri(uri):
19 """Parse an S3 URI, returning (bucket, path)"""
20 parsed = urllib.parse.urlparse(uri)
21 if parsed.scheme != "s3":
22 raise Exception("Not an S3 URI: %s" % uri)
23 path = parsed.path
24 if path.startswith("/"):
25 path = path[1:]
26 return parsed.netloc, path
27
28 @staticmethod
29 def get_s3_file_upload_extra_args():
30 import json
31
32 s3_file_upload_extra_args = os.environ.get("MLFLOW_S3_UPLOAD_EXTRA_ARGS")
33 if s3_file_upload_extra_args:
34 return json.loads(s3_file_upload_extra_args)
35 else:
36 return None
37
38 def _get_s3_client(self):
39 import boto3
40 from botocore.client import Config
41
42 s3_endpoint_url = os.environ.get("MLFLOW_S3_ENDPOINT_URL")
43 ignore_tls = os.environ.get("MLFLOW_S3_IGNORE_TLS")
44
45 verify = True
46 if ignore_tls:
47 verify = ignore_tls.lower() not in ["true", "yes", "1"]
48
49 # NOTE: If you need to specify this env variable, please file an issue at
50 # https://github.com/mlflow/mlflow/issues so we know your use-case!
51 signature_version = os.environ.get("MLFLOW_EXPERIMENTAL_S3_SIGNATURE_VERSION", "s3v4")
52 return boto3.client(
53 "s3",
54 config=Config(signature_version=signature_version),
55 endpoint_url=s3_endpoint_url,
56 verify=verify,
57 )
58
59 def _upload_file(self, s3_client, local_file, bucket, key):
60 extra_args = dict()
61 guessed_type, guessed_encoding = guess_type(local_file)
62 if guessed_type is not None:
63 extra_args["ContentType"] = guessed_type
64 if guessed_encoding is not None:
65 extra_args["ContentEncoding"] = guessed_encoding
66 environ_extra_args = self.get_s3_file_upload_extra_args()
67 if environ_extra_args is not None:
68 extra_args.update(environ_extra_args)
69 s3_client.upload_file(Filename=local_file, Bucket=bucket, Key=key, ExtraArgs=extra_args)
70
71 def log_artifact(self, local_file, artifact_path=None):
72 (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri)
73 if artifact_path:
74 dest_path = posixpath.join(dest_path, artifact_path)
75 dest_path = posixpath.join(dest_path, os.path.basename(local_file))
76 self._upload_file(
77 s3_client=self._get_s3_client(), local_file=local_file, bucket=bucket, key=dest_path
78 )
79
80 def log_artifacts(self, local_dir, artifact_path=None):
81 (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri)
82 if artifact_path:
83 dest_path = posixpath.join(dest_path, artifact_path)
84 s3_client = self._get_s3_client()
85 local_dir = os.path.abspath(local_dir)
86 for (root, _, filenames) in os.walk(local_dir):
87 upload_path = dest_path
88 if root != local_dir:
89 rel_path = os.path.relpath(root, local_dir)
90 rel_path = relative_path_to_artifact_path(rel_path)
91 upload_path = posixpath.join(dest_path, rel_path)
92 for f in filenames:
93 self._upload_file(
94 s3_client=s3_client,
95 local_file=os.path.join(root, f),
96 bucket=bucket,
97 key=posixpath.join(upload_path, f),
98 )
99
100 def list_artifacts(self, path=None):
101 (bucket, artifact_path) = data.parse_s3_uri(self.artifact_uri)
102 dest_path = artifact_path
103 if path:
104 dest_path = posixpath.join(dest_path, path)
105 infos = []
106 prefix = dest_path + "/" if dest_path else ""
107 s3_client = self._get_s3_client()
108 paginator = s3_client.get_paginator("list_objects_v2")
109 results = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter="/")
110 for result in results:
111 # Subdirectories will be listed as "common prefixes" due to the way we made the request
112 for obj in result.get("CommonPrefixes", []):
113 subdir_path = obj.get("Prefix")
114 self._verify_listed_object_contains_artifact_path_prefix(
115 listed_object_path=subdir_path, artifact_path=artifact_path
116 )
117 subdir_rel_path = posixpath.relpath(path=subdir_path, start=artifact_path)
118 if subdir_rel_path.endswith("/"):
119 subdir_rel_path = subdir_rel_path[:-1]
120 infos.append(FileInfo(subdir_rel_path, True, None))
121 # Objects listed directly will be files
122 for obj in result.get("Contents", []):
123 file_path = obj.get("Key")
124 self._verify_listed_object_contains_artifact_path_prefix(
125 listed_object_path=file_path, artifact_path=artifact_path
126 )
127 file_rel_path = posixpath.relpath(path=file_path, start=artifact_path)
128 file_size = int(obj.get("Size"))
129 infos.append(FileInfo(file_rel_path, False, file_size))
130 return sorted(infos, key=lambda f: f.path)
131
132 @staticmethod
133 def _verify_listed_object_contains_artifact_path_prefix(listed_object_path, artifact_path):
134 if not listed_object_path.startswith(artifact_path):
135 raise MlflowException(
136 "The path of the listed S3 object does not begin with the specified"
137 " artifact path. Artifact path: {artifact_path}. Object path:"
138 " {object_path}.".format(
139 artifact_path=artifact_path, object_path=listed_object_path
140 )
141 )
142
143 def _download_file(self, remote_file_path, local_path):
144 (bucket, s3_root_path) = data.parse_s3_uri(self.artifact_uri)
145 s3_full_path = posixpath.join(s3_root_path, remote_file_path)
146 s3_client = self._get_s3_client()
147 s3_client.download_file(bucket, s3_full_path, local_path)
148
149 def delete_artifacts(self, artifact_path=None):
150 (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri)
151 if artifact_path:
152 dest_path = posixpath.join(dest_path, artifact_path)
153
154 s3_client = self._get_s3_client()
155 list_objects = s3_client.list_objects(Bucket=bucket, Prefix=dest_path).get("Contents", [])
156 for to_delete_obj in list_objects:
157 file_path = to_delete_obj.get("Key")
158 self._verify_listed_object_contains_artifact_path_prefix(
159 listed_object_path=file_path, artifact_path=dest_path
160 )
161 s3_client.delete_object(Bucket=bucket, Key=file_path)
162
[end of mlflow/store/artifact/s3_artifact_repo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlflow/store/artifact/s3_artifact_repo.py b/mlflow/store/artifact/s3_artifact_repo.py
--- a/mlflow/store/artifact/s3_artifact_repo.py
+++ b/mlflow/store/artifact/s3_artifact_repo.py
@@ -42,9 +42,14 @@
s3_endpoint_url = os.environ.get("MLFLOW_S3_ENDPOINT_URL")
ignore_tls = os.environ.get("MLFLOW_S3_IGNORE_TLS")
- verify = True
+ do_verify = True
if ignore_tls:
- verify = ignore_tls.lower() not in ["true", "yes", "1"]
+ do_verify = ignore_tls.lower() not in ["true", "yes", "1"]
+
+ # The valid verify argument value is None/False/path to cert bundle file, See
+ # https://github.com/boto/boto3/blob/73865126cad3938ca80a2f567a1c79cb248169a7/
+ # boto3/session.py#L212
+ verify = None if do_verify else False
# NOTE: If you need to specify this env variable, please file an issue at
# https://github.com/mlflow/mlflow/issues so we know your use-case!
|
{"golden_diff": "diff --git a/mlflow/store/artifact/s3_artifact_repo.py b/mlflow/store/artifact/s3_artifact_repo.py\n--- a/mlflow/store/artifact/s3_artifact_repo.py\n+++ b/mlflow/store/artifact/s3_artifact_repo.py\n@@ -42,9 +42,14 @@\n s3_endpoint_url = os.environ.get(\"MLFLOW_S3_ENDPOINT_URL\")\n ignore_tls = os.environ.get(\"MLFLOW_S3_IGNORE_TLS\")\n \n- verify = True\n+ do_verify = True\n if ignore_tls:\n- verify = ignore_tls.lower() not in [\"true\", \"yes\", \"1\"]\n+ do_verify = ignore_tls.lower() not in [\"true\", \"yes\", \"1\"]\n+\n+ # The valid verify argument value is None/False/path to cert bundle file, See\n+ # https://github.com/boto/boto3/blob/73865126cad3938ca80a2f567a1c79cb248169a7/\n+ # boto3/session.py#L212\n+ verify = None if do_verify else False\n \n # NOTE: If you need to specify this env variable, please file an issue at\n # https://github.com/mlflow/mlflow/issues so we know your use-case!\n", "issue": "[BUG] change in 1.12.0 causes S3 SSLError\nThank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) for additional information about bug reports. For help with debugging your code, please refer to [Stack Overflow](https://stackoverflow.com/questions/tagged/mlflow).\r\n\r\n**Please fill in this bug report template to ensure a timely and thorough response.**\r\n\r\n### Willingness to contribute\r\nThe MLflow Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the MLflow code base?\r\n\r\n- [ ] Yes. I can contribute a fix for this bug independently.\r\n- [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the MLflow community.\r\n- [ x] No. I cannot contribute a bug fix at this time.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**:\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: macOS 10.15.6\r\n- **MLflow installed from (source or binary)**: binary\r\n- **MLflow version (run ``mlflow --version``)**: 1.12.0\r\n- **Python version**: 3.8.5\r\n- **npm version, if running the dev UI**:\r\n- **Exact command to reproduce**:\r\nS3ArtifactRepository(my_uri).list_artifacts(my_path)\r\n\r\n### Describe the problem\r\nDescribe the problem clearly here. Include descriptions of the expected behavior and the actual behavior.\r\n\r\nAs of this version I'm getting an error:\r\n\"certificate verify failed: self signed certificate in certificate chain.\" \r\n\r\nI'm working with private corporate endpoints. This worked fine before 1.12.0. I've isolated it to changes in mlflow.store.artifact.s3_artifact_repo, line 54. Simply adding verify=True explicitly seems to have caused this, even though this should be the default in boto3. \r\n\r\n### Code to reproduce issue\r\nProvide a reproducible test case that is the bare minimum necessary to generate the problem.\r\n\r\nTricky \r\n\r\n### Other info / logs\r\nInclude any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.\r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [x ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "import os\nfrom mimetypes import guess_type\n\nimport posixpath\nimport urllib.parse\n\nfrom mlflow import data\nfrom mlflow.entities import FileInfo\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import relative_path_to_artifact_path\n\n\nclass S3ArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts on Amazon S3.\"\"\"\n\n @staticmethod\n def parse_s3_uri(uri):\n \"\"\"Parse an S3 URI, returning (bucket, path)\"\"\"\n parsed = urllib.parse.urlparse(uri)\n if parsed.scheme != \"s3\":\n raise Exception(\"Not an S3 URI: %s\" % uri)\n path = parsed.path\n if path.startswith(\"/\"):\n path = path[1:]\n return parsed.netloc, path\n\n @staticmethod\n def get_s3_file_upload_extra_args():\n import json\n\n s3_file_upload_extra_args = os.environ.get(\"MLFLOW_S3_UPLOAD_EXTRA_ARGS\")\n if s3_file_upload_extra_args:\n return json.loads(s3_file_upload_extra_args)\n else:\n return None\n\n def _get_s3_client(self):\n import boto3\n from botocore.client import Config\n\n s3_endpoint_url = os.environ.get(\"MLFLOW_S3_ENDPOINT_URL\")\n ignore_tls = os.environ.get(\"MLFLOW_S3_IGNORE_TLS\")\n\n verify = True\n if ignore_tls:\n verify = ignore_tls.lower() not in [\"true\", \"yes\", \"1\"]\n\n # NOTE: If you need to specify this env variable, please file an issue at\n # https://github.com/mlflow/mlflow/issues so we know your use-case!\n signature_version = os.environ.get(\"MLFLOW_EXPERIMENTAL_S3_SIGNATURE_VERSION\", \"s3v4\")\n return boto3.client(\n \"s3\",\n config=Config(signature_version=signature_version),\n endpoint_url=s3_endpoint_url,\n verify=verify,\n )\n\n def _upload_file(self, s3_client, local_file, bucket, key):\n extra_args = dict()\n guessed_type, guessed_encoding = guess_type(local_file)\n if guessed_type is not None:\n extra_args[\"ContentType\"] = guessed_type\n if guessed_encoding is not None:\n extra_args[\"ContentEncoding\"] = guessed_encoding\n environ_extra_args = self.get_s3_file_upload_extra_args()\n if environ_extra_args is not None:\n extra_args.update(environ_extra_args)\n s3_client.upload_file(Filename=local_file, Bucket=bucket, Key=key, ExtraArgs=extra_args)\n\n def log_artifact(self, local_file, artifact_path=None):\n (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n dest_path = posixpath.join(dest_path, os.path.basename(local_file))\n self._upload_file(\n s3_client=self._get_s3_client(), local_file=local_file, bucket=bucket, key=dest_path\n )\n\n def log_artifacts(self, local_dir, artifact_path=None):\n (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n s3_client = self._get_s3_client()\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = os.path.relpath(root, local_dir)\n rel_path = relative_path_to_artifact_path(rel_path)\n upload_path = posixpath.join(dest_path, rel_path)\n for f in filenames:\n self._upload_file(\n s3_client=s3_client,\n local_file=os.path.join(root, f),\n bucket=bucket,\n key=posixpath.join(upload_path, f),\n )\n\n def list_artifacts(self, path=None):\n (bucket, artifact_path) = data.parse_s3_uri(self.artifact_uri)\n dest_path = artifact_path\n if path:\n dest_path = posixpath.join(dest_path, path)\n infos = []\n prefix = dest_path + \"/\" if dest_path else \"\"\n s3_client = self._get_s3_client()\n paginator = s3_client.get_paginator(\"list_objects_v2\")\n results = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter=\"/\")\n for result in results:\n # Subdirectories will be listed as \"common prefixes\" due to the way we made the request\n for obj in result.get(\"CommonPrefixes\", []):\n subdir_path = obj.get(\"Prefix\")\n self._verify_listed_object_contains_artifact_path_prefix(\n listed_object_path=subdir_path, artifact_path=artifact_path\n )\n subdir_rel_path = posixpath.relpath(path=subdir_path, start=artifact_path)\n if subdir_rel_path.endswith(\"/\"):\n subdir_rel_path = subdir_rel_path[:-1]\n infos.append(FileInfo(subdir_rel_path, True, None))\n # Objects listed directly will be files\n for obj in result.get(\"Contents\", []):\n file_path = obj.get(\"Key\")\n self._verify_listed_object_contains_artifact_path_prefix(\n listed_object_path=file_path, artifact_path=artifact_path\n )\n file_rel_path = posixpath.relpath(path=file_path, start=artifact_path)\n file_size = int(obj.get(\"Size\"))\n infos.append(FileInfo(file_rel_path, False, file_size))\n return sorted(infos, key=lambda f: f.path)\n\n @staticmethod\n def _verify_listed_object_contains_artifact_path_prefix(listed_object_path, artifact_path):\n if not listed_object_path.startswith(artifact_path):\n raise MlflowException(\n \"The path of the listed S3 object does not begin with the specified\"\n \" artifact path. Artifact path: {artifact_path}. Object path:\"\n \" {object_path}.\".format(\n artifact_path=artifact_path, object_path=listed_object_path\n )\n )\n\n def _download_file(self, remote_file_path, local_path):\n (bucket, s3_root_path) = data.parse_s3_uri(self.artifact_uri)\n s3_full_path = posixpath.join(s3_root_path, remote_file_path)\n s3_client = self._get_s3_client()\n s3_client.download_file(bucket, s3_full_path, local_path)\n\n def delete_artifacts(self, artifact_path=None):\n (bucket, dest_path) = data.parse_s3_uri(self.artifact_uri)\n if artifact_path:\n dest_path = posixpath.join(dest_path, artifact_path)\n\n s3_client = self._get_s3_client()\n list_objects = s3_client.list_objects(Bucket=bucket, Prefix=dest_path).get(\"Contents\", [])\n for to_delete_obj in list_objects:\n file_path = to_delete_obj.get(\"Key\")\n self._verify_listed_object_contains_artifact_path_prefix(\n listed_object_path=file_path, artifact_path=dest_path\n )\n s3_client.delete_object(Bucket=bucket, Key=file_path)\n", "path": "mlflow/store/artifact/s3_artifact_repo.py"}]}
| 3,328 | 292 |
gh_patches_debug_35495
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-9075
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use bibtex in eegbci.py
convert references in `mne/datasets/eegbci/eegbci.py` to use footcite / footbibliography
</issue>
<code>
[start of mne/datasets/eegbci/eegbci.py]
1 # Author: Martin Billinger <[email protected]>
2 # License: BSD Style.
3
4 import os
5 from os import path as op
6
7 from ..utils import _get_path, _do_path_update
8 from ...utils import _fetch_file, _url_to_local_path, verbose
9
10
11 EEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/'
12
13
14 @verbose
15 def data_path(url, path=None, force_update=False, update_path=None,
16 verbose=None):
17 """Get path to local copy of EEGMMI dataset URL.
18
19 This is a low-level function useful for getting a local copy of a
20 remote EEGBCI dataset [1]_ which is available at PhysioNet [2]_.
21
22 Parameters
23 ----------
24 url : str
25 The dataset to use.
26 path : None | str
27 Location of where to look for the EEGBCI data storing location.
28 If None, the environment variable or config parameter
29 ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the
30 "~/mne_data" directory is used. If the EEGBCI dataset
31 is not found under the given path, the data
32 will be automatically downloaded to the specified folder.
33 force_update : bool
34 Force update of the dataset even if a local copy exists.
35 update_path : bool | None
36 If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
37 config to the given path. If None, the user is prompted.
38 %(verbose)s
39
40 Returns
41 -------
42 path : list of str
43 Local path to the given data file. This path is contained inside a list
44 of length one, for compatibility.
45
46 Notes
47 -----
48 For example, one could do:
49
50 >>> from mne.datasets import eegbci
51 >>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/'
52 >>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
53
54 This would download the given EEGBCI data file to the 'datasets' folder,
55 and prompt the user to save the 'datasets' path to the mne-python config,
56 if it isn't there already.
57
58 References
59 ----------
60 .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
61 Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer
62 Interface (BCI) System. IEEE TBME 51(6):1034-1043
63 .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
64 Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
65 PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
66 Research Resource for Complex Physiologic Signals.
67 Circulation 101(23):e215-e220
68 """ # noqa: E501
69 key = 'MNE_DATASETS_EEGBCI_PATH'
70 name = 'EEGBCI'
71 path = _get_path(path, key, name)
72 destination = _url_to_local_path(url, op.join(path, 'MNE-eegbci-data'))
73 destinations = [destination]
74
75 # Fetch the file
76 if not op.isfile(destination) or force_update:
77 if op.isfile(destination):
78 os.remove(destination)
79 if not op.isdir(op.dirname(destination)):
80 os.makedirs(op.dirname(destination))
81 _fetch_file(url, destination, print_destination=False)
82
83 # Offer to update the path
84 _do_path_update(path, update_path, key, name)
85 return destinations
86
87
88 @verbose
89 def load_data(subject, runs, path=None, force_update=False, update_path=None,
90 base_url=EEGMI_URL, verbose=None): # noqa: D301
91 """Get paths to local copies of EEGBCI dataset files.
92
93 This will fetch data for the EEGBCI dataset [1]_, which is also
94 available at PhysioNet [2]_.
95
96 Parameters
97 ----------
98 subject : int
99 The subject to use. Can be in the range of 1-109 (inclusive).
100 runs : int | list of int
101 The runs to use. See Notes for details.
102 path : None | str
103 Location of where to look for the EEGBCI data storing location.
104 If None, the environment variable or config parameter
105 ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the
106 "~/mne_data" directory is used. If the EEGBCI dataset
107 is not found under the given path, the data
108 will be automatically downloaded to the specified folder.
109 force_update : bool
110 Force update of the dataset even if a local copy exists.
111 update_path : bool | None
112 If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
113 config to the given path. If None, the user is prompted.
114 base_url : str
115 The URL root for the data.
116 %(verbose)s
117
118 Returns
119 -------
120 paths : list
121 List of local data paths of the given type.
122
123 Notes
124 -----
125 The run numbers correspond to:
126
127 ========= ===================================
128 run task
129 ========= ===================================
130 1 Baseline, eyes open
131 2 Baseline, eyes closed
132 3, 7, 11 Motor execution: left vs right hand
133 4, 8, 12 Motor imagery: left vs right hand
134 5, 9, 13 Motor execution: hands vs feet
135 6, 10, 14 Motor imagery: hands vs feet
136 ========= ===================================
137
138 For example, one could do::
139
140 >>> from mne.datasets import eegbci
141 >>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP
142
143 This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from
144 subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the
145 user to save the 'datasets' path to the mne-python config, if it isn't
146 there already.
147
148 References
149 ----------
150 .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
151 Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer
152 Interface (BCI) System. IEEE TBME 51(6):1034-1043
153 .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
154 Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
155 PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
156 Research Resource for Complex Physiologic Signals.
157 Circulation 101(23):e215-e220
158 """ # noqa: E501
159 if not hasattr(runs, '__iter__'):
160 runs = [runs]
161
162 data_paths = []
163 for r in runs:
164 url = '{u}S{s:03d}/S{s:03d}R{r:02d}.edf'.format(u=base_url,
165 s=subject, r=r)
166 data_paths.extend(data_path(url, path, force_update, update_path))
167
168 return data_paths
169
170
171 def standardize(raw):
172 """Standardize channel positions and names.
173
174 Parameters
175 ----------
176 raw : instance of Raw
177 The raw data to standardize. Operates in-place.
178 """
179 rename = dict()
180 for name in raw.ch_names:
181 std_name = name.strip('.')
182 std_name = std_name.upper()
183 if std_name.endswith('Z'):
184 std_name = std_name[:-1] + 'z'
185 if std_name.startswith('FP'):
186 std_name = 'Fp' + std_name[2:]
187 rename[name] = std_name
188 raw.rename_channels(rename)
189
[end of mne/datasets/eegbci/eegbci.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py
--- a/mne/datasets/eegbci/eegbci.py
+++ b/mne/datasets/eegbci/eegbci.py
@@ -17,7 +17,7 @@
"""Get path to local copy of EEGMMI dataset URL.
This is a low-level function useful for getting a local copy of a
- remote EEGBCI dataset [1]_ which is available at PhysioNet [2]_.
+ remote EEGBCI dataset :footcite:`SchalkEtAl2004` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`.
Parameters
----------
@@ -57,14 +57,7 @@
References
----------
- .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
- Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer
- Interface (BCI) System. IEEE TBME 51(6):1034-1043
- .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
- Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
- PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
- Research Resource for Complex Physiologic Signals.
- Circulation 101(23):e215-e220
+ .. footbibliography::
""" # noqa: E501
key = 'MNE_DATASETS_EEGBCI_PATH'
name = 'EEGBCI'
@@ -90,8 +83,8 @@
base_url=EEGMI_URL, verbose=None): # noqa: D301
"""Get paths to local copies of EEGBCI dataset files.
- This will fetch data for the EEGBCI dataset [1]_, which is also
- available at PhysioNet [2]_.
+ This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is also
+ available at PhysioNet :footcite:`GoldbergerEtAl2000`.
Parameters
----------
@@ -147,14 +140,7 @@
References
----------
- .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,
- Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer
- Interface (BCI) System. IEEE TBME 51(6):1034-1043
- .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
- Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
- PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
- Research Resource for Complex Physiologic Signals.
- Circulation 101(23):e215-e220
+ .. footbibliography::
""" # noqa: E501
if not hasattr(runs, '__iter__'):
runs = [runs]
|
{"golden_diff": "diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py\n--- a/mne/datasets/eegbci/eegbci.py\n+++ b/mne/datasets/eegbci/eegbci.py\n@@ -17,7 +17,7 @@\n \"\"\"Get path to local copy of EEGMMI dataset URL.\n \n This is a low-level function useful for getting a local copy of a\n- remote EEGBCI dataset [1]_ which is available at PhysioNet [2]_.\n+ remote EEGBCI dataset :footcite:`SchalkEtAl2004` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`.\n \n Parameters\n ----------\n@@ -57,14 +57,7 @@\n \n References\n ----------\n- .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n- Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer\n- Interface (BCI) System. IEEE TBME 51(6):1034-1043\n- .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,\n- Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)\n- PhysioBank, PhysioToolkit, and PhysioNet: Components of a New\n- Research Resource for Complex Physiologic Signals.\n- Circulation 101(23):e215-e220\n+ .. footbibliography::\n \"\"\" # noqa: E501\n key = 'MNE_DATASETS_EEGBCI_PATH'\n name = 'EEGBCI'\n@@ -90,8 +83,8 @@\n base_url=EEGMI_URL, verbose=None): # noqa: D301\n \"\"\"Get paths to local copies of EEGBCI dataset files.\n \n- This will fetch data for the EEGBCI dataset [1]_, which is also\n- available at PhysioNet [2]_.\n+ This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is also\n+ available at PhysioNet :footcite:`GoldbergerEtAl2000`.\n \n Parameters\n ----------\n@@ -147,14 +140,7 @@\n \n References\n ----------\n- .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n- Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer\n- Interface (BCI) System. IEEE TBME 51(6):1034-1043\n- .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,\n- Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)\n- PhysioBank, PhysioToolkit, and PhysioNet: Components of a New\n- Research Resource for Complex Physiologic Signals.\n- Circulation 101(23):e215-e220\n+ .. footbibliography::\n \"\"\" # noqa: E501\n if not hasattr(runs, '__iter__'):\n runs = [runs]\n", "issue": "use bibtex in eegbci.py\nconvert references in `mne/datasets/eegbci/eegbci.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "# Author: Martin Billinger <[email protected]>\n# License: BSD Style.\n\nimport os\nfrom os import path as op\n\nfrom ..utils import _get_path, _do_path_update\nfrom ...utils import _fetch_file, _url_to_local_path, verbose\n\n\nEEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/'\n\n\n@verbose\ndef data_path(url, path=None, force_update=False, update_path=None,\n verbose=None):\n \"\"\"Get path to local copy of EEGMMI dataset URL.\n\n This is a low-level function useful for getting a local copy of a\n remote EEGBCI dataset [1]_ which is available at PhysioNet [2]_.\n\n Parameters\n ----------\n url : str\n The dataset to use.\n path : None | str\n Location of where to look for the EEGBCI data storing location.\n If None, the environment variable or config parameter\n ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the\n \"~/mne_data\" directory is used. If the EEGBCI dataset\n is not found under the given path, the data\n will be automatically downloaded to the specified folder.\n force_update : bool\n Force update of the dataset even if a local copy exists.\n update_path : bool | None\n If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python\n config to the given path. If None, the user is prompted.\n %(verbose)s\n\n Returns\n -------\n path : list of str\n Local path to the given data file. This path is contained inside a list\n of length one, for compatibility.\n\n Notes\n -----\n For example, one could do:\n\n >>> from mne.datasets import eegbci\n >>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/'\n >>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP\n\n This would download the given EEGBCI data file to the 'datasets' folder,\n and prompt the user to save the 'datasets' path to the mne-python config,\n if it isn't there already.\n\n References\n ----------\n .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer\n Interface (BCI) System. IEEE TBME 51(6):1034-1043\n .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,\n Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)\n PhysioBank, PhysioToolkit, and PhysioNet: Components of a New\n Research Resource for Complex Physiologic Signals.\n Circulation 101(23):e215-e220\n \"\"\" # noqa: E501\n key = 'MNE_DATASETS_EEGBCI_PATH'\n name = 'EEGBCI'\n path = _get_path(path, key, name)\n destination = _url_to_local_path(url, op.join(path, 'MNE-eegbci-data'))\n destinations = [destination]\n\n # Fetch the file\n if not op.isfile(destination) or force_update:\n if op.isfile(destination):\n os.remove(destination)\n if not op.isdir(op.dirname(destination)):\n os.makedirs(op.dirname(destination))\n _fetch_file(url, destination, print_destination=False)\n\n # Offer to update the path\n _do_path_update(path, update_path, key, name)\n return destinations\n\n\n@verbose\ndef load_data(subject, runs, path=None, force_update=False, update_path=None,\n base_url=EEGMI_URL, verbose=None): # noqa: D301\n \"\"\"Get paths to local copies of EEGBCI dataset files.\n\n This will fetch data for the EEGBCI dataset [1]_, which is also\n available at PhysioNet [2]_.\n\n Parameters\n ----------\n subject : int\n The subject to use. Can be in the range of 1-109 (inclusive).\n runs : int | list of int\n The runs to use. See Notes for details.\n path : None | str\n Location of where to look for the EEGBCI data storing location.\n If None, the environment variable or config parameter\n ``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the\n \"~/mne_data\" directory is used. If the EEGBCI dataset\n is not found under the given path, the data\n will be automatically downloaded to the specified folder.\n force_update : bool\n Force update of the dataset even if a local copy exists.\n update_path : bool | None\n If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python\n config to the given path. If None, the user is prompted.\n base_url : str\n The URL root for the data.\n %(verbose)s\n\n Returns\n -------\n paths : list\n List of local data paths of the given type.\n\n Notes\n -----\n The run numbers correspond to:\n\n ========= ===================================\n run task\n ========= ===================================\n 1 Baseline, eyes open\n 2 Baseline, eyes closed\n 3, 7, 11 Motor execution: left vs right hand\n 4, 8, 12 Motor imagery: left vs right hand\n 5, 9, 13 Motor execution: hands vs feet\n 6, 10, 14 Motor imagery: hands vs feet\n ========= ===================================\n\n For example, one could do::\n\n >>> from mne.datasets import eegbci\n >>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP\n\n This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from\n subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the\n user to save the 'datasets' path to the mne-python config, if it isn't\n there already.\n\n References\n ----------\n .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N.,\n Wolpaw, J.R. (2004) BCI2000: A General-Purpose Brain-Computer\n Interface (BCI) System. IEEE TBME 51(6):1034-1043\n .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,\n Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)\n PhysioBank, PhysioToolkit, and PhysioNet: Components of a New\n Research Resource for Complex Physiologic Signals.\n Circulation 101(23):e215-e220\n \"\"\" # noqa: E501\n if not hasattr(runs, '__iter__'):\n runs = [runs]\n\n data_paths = []\n for r in runs:\n url = '{u}S{s:03d}/S{s:03d}R{r:02d}.edf'.format(u=base_url,\n s=subject, r=r)\n data_paths.extend(data_path(url, path, force_update, update_path))\n\n return data_paths\n\n\ndef standardize(raw):\n \"\"\"Standardize channel positions and names.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw data to standardize. Operates in-place.\n \"\"\"\n rename = dict()\n for name in raw.ch_names:\n std_name = name.strip('.')\n std_name = std_name.upper()\n if std_name.endswith('Z'):\n std_name = std_name[:-1] + 'z'\n if std_name.startswith('FP'):\n std_name = 'Fp' + std_name[2:]\n rename[name] = std_name\n raw.rename_channels(rename)\n", "path": "mne/datasets/eegbci/eegbci.py"}]}
| 2,946 | 818 |
gh_patches_debug_32134
|
rasdani/github-patches
|
git_diff
|
GPflow__GPflow-1860
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug check_shapes with keras layers
Hi @jesnie,
I've been using the `check_shapes` decorator in eager the last couple of days and I really love it!
I've encountered a small inconvenience when using it in combination with Keras layers. The following snippets show the weird behaviour:
1. Custom keras layer without `check_shapes` decorator runs:
```python
from gpflow.experimental.check_shapes import check_shapes
import tensorflow as tf
class MyModel(tf.keras.Model):
def call(self, x, y):
return x + y
layer = MyModel()
x = tf.random.normal((32, 3))
y = tf.random.normal((32, 1))
layer(x, y)
```
2. Adding `check_shapes` decorator breaks the code:
```python
from gpflow.experimental.check_shapes import check_shapes
import tensorflow as tf
class MyModel(tf.keras.Model):
@check_shapes(
"x: [batch, input_dim]",
"y: [batch, 1]",
"return: [batch, input_dim]",
)
def call(self, x, y):
return x + y
layer = MyModel()
x = tf.random.normal((32, 3))
y = tf.random.normal((32, 1))
layer(x, y)
```
as now the layer expects a `training` kwarg to be added. The following error is thrown:
```
[TypeError: call() got an unexpected keyword argument 'training']()
```
3. After adding a `training` kwarg the code runs again. However, this isn't necessary when only using "native" keras.
```python
from gpflow.experimental.check_shapes import check_shapes
import tensorflow as tf
class MyModel(tf.keras.Model):
@check_shapes(
"x: [batch, input_dim]",
"y: [batch, 1]",
"return: [batch, input_dim]",
)
def call(self, x, y, training = None):
return x + y
layer = MyModel()
x = tf.random.normal((32, 3))
y = tf.random.normal((32, 1))
layer(x, y)
```
I'm using the following versions with python 3.8.13:
```
gpflow 2.4.0
tensorflow 2.5.3
```
Many thanks,
Vincent
</issue>
<code>
[start of gpflow/experimental/check_shapes/decorator.py]
1 # Copyright 2022 The GPflow Contributors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Decorator for checking the shapes of function using tf Tensors.
16 """
17 import inspect
18 from functools import wraps
19 from typing import Any, Callable, Sequence, cast
20
21 from ..utils import experimental
22 from .accessors import set_check_shapes
23 from .argument_ref import RESULT_TOKEN
24 from .base_types import C
25 from .checker import ShapeChecker
26 from .checker_context import set_shape_checker
27 from .config import get_enable_check_shapes
28 from .error_contexts import (
29 ConditionContext,
30 FunctionCallContext,
31 FunctionDefinitionContext,
32 NoteContext,
33 ParallelContext,
34 StackContext,
35 )
36 from .parser import parse_and_rewrite_docstring, parse_function_spec
37 from .specs import ParsedArgumentSpec
38
39
40 def null_check_shapes(func: C) -> C:
41 """
42 Annotates the given function so that it looks like it has shape checks, but without actually
43 checking anything.
44
45 This is necessary not to break `@inherit_check_shapes` when shape checking is disabled.
46 """
47 set_check_shapes(func, null_check_shapes)
48 return func
49
50
51 @experimental
52 def check_shapes(*specs: str) -> Callable[[C], C]:
53 """
54 Decorator that checks the shapes of tensor arguments.
55
56 :param specs: Specification of arguments to check. See: `Check specification`_.
57 """
58 if not get_enable_check_shapes():
59 return null_check_shapes
60
61 unbound_error_context = FunctionCallContext(check_shapes)
62
63 func_spec = parse_function_spec(specs, unbound_error_context)
64
65 pre_specs = [spec for spec in func_spec.arguments if not spec.argument_ref.is_result]
66 post_specs = [spec for spec in func_spec.arguments if spec.argument_ref.is_result]
67 note_specs = func_spec.notes
68
69 def _check_shapes(func: C) -> C:
70 bound_error_context = FunctionDefinitionContext(func)
71 signature = inspect.signature(func)
72
73 @wraps(func)
74 def wrapped(*args: Any, **kwargs: Any) -> Any:
75 if not get_enable_check_shapes():
76 return func(*args, **kwargs)
77
78 try:
79 bound_arguments = signature.bind(*args, **kwargs)
80 except TypeError as e:
81 # TypeError is raised if *args and **kwargs don't actually match the arguments of
82 # `func`. In that case we just call `func` normally, which will also result in an
83 # error, but an error with the error message the user is used to.
84 func(*args, **kwargs)
85 raise AssertionError(
86 "The above line should fail so this line should never be reached."
87 ) from e
88 bound_arguments.apply_defaults()
89 arg_map = bound_arguments.arguments
90
91 checker = ShapeChecker()
92 for note_spec in note_specs:
93 checker.add_context(StackContext(bound_error_context, NoteContext(note_spec)))
94
95 def _check_specs(specs: Sequence[ParsedArgumentSpec]) -> None:
96 processed_specs = []
97
98 for arg_spec in specs:
99 for arg_value, relative_arg_context in arg_spec.argument_ref.get(
100 arg_map, bound_error_context
101 ):
102 arg_context = StackContext(bound_error_context, relative_arg_context)
103
104 if arg_spec.condition is not None:
105 condition, condition_context = arg_spec.condition.get(
106 arg_map,
107 StackContext(arg_context, ConditionContext(arg_spec.condition)),
108 )
109 if not condition:
110 continue
111 arg_context = StackContext(
112 bound_error_context,
113 ParallelContext(
114 (
115 StackContext(
116 relative_arg_context,
117 StackContext(
118 ConditionContext(arg_spec.condition),
119 condition_context,
120 ),
121 ),
122 )
123 ),
124 )
125
126 processed_specs.append((arg_value, arg_spec.tensor, arg_context))
127
128 checker.check_shapes(processed_specs)
129
130 _check_specs(pre_specs)
131
132 with set_shape_checker(checker):
133 result = func(*args, **kwargs)
134 arg_map[RESULT_TOKEN] = result
135
136 _check_specs(post_specs)
137
138 return result
139
140 set_check_shapes(wrapped, _check_shapes)
141 wrapped.__doc__ = parse_and_rewrite_docstring(
142 wrapped.__doc__, func_spec, bound_error_context
143 )
144 return cast(C, wrapped)
145
146 return _check_shapes
147
[end of gpflow/experimental/check_shapes/decorator.py]
[start of gpflow/experimental/check_shapes/exceptions.py]
1 # Copyright 2022 The GPflow Contributors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=broad-except
16
17 """
18 Exceptions generated by `check_shapes`.
19
20 These rely heavily on the infrastructure in `error_contexts.py`.
21 """
22 from .error_contexts import ErrorContext, MessageBuilder
23
24
25 class CheckShapesError(Exception):
26 """
27 Common super class for `check_shapes` errors.
28 """
29
30 error_message: str
31
32 def __init__(self, context: ErrorContext) -> None:
33 builder = MessageBuilder()
34 builder.add_line("")
35 builder.add_line(self.error_message)
36 with builder.indent() as b:
37 context.print(b)
38 super().__init__(builder.build())
39
40 self.context = context
41
42
43 class VariableTypeError(CheckShapesError):
44 """
45 Error raised if a variable is used both as a rank-1 and a variable-rank variable.
46 """
47
48 error_message = (
49 "Cannot use the same variable to bind both a single dimension"
50 " and a variable number of dimensions."
51 )
52
53
54 class SpecificationParseError(CheckShapesError):
55 """
56 Error raised if there was an error parsing the shape specification.
57 """
58
59 error_message = "Unable to parse shape specification."
60
61
62 class DocstringParseError(CheckShapesError):
63 """
64 Error raised if there was an error parsing the shape specification.
65 """
66
67 error_message = "Unable to parse docstring."
68
69
70 class ArgumentReferenceError(CheckShapesError):
71 """
72 Error raised if the argument to check the shape of could not be resolved.
73 """
74
75 error_message = "Unable to resolve argument / missing argument."
76
77
78 class ShapeMismatchError(CheckShapesError):
79 """
80 Error raised if a function is called with tensors of the wrong shape.
81 """
82
83 error_message = "Tensor shape mismatch."
84
85
86 class NoShapeError(CheckShapesError):
87 """
88 Error raised if we are trying to get the shape of an object that does not have a shape.
89 """
90
91 error_message = "Unable to determine shape of object."
92
[end of gpflow/experimental/check_shapes/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gpflow/experimental/check_shapes/decorator.py b/gpflow/experimental/check_shapes/decorator.py
--- a/gpflow/experimental/check_shapes/decorator.py
+++ b/gpflow/experimental/check_shapes/decorator.py
@@ -15,9 +15,11 @@
Decorator for checking the shapes of function using tf Tensors.
"""
import inspect
-from functools import wraps
+from functools import update_wrapper
from typing import Any, Callable, Sequence, cast
+import tensorflow as tf
+
from ..utils import experimental
from .accessors import set_check_shapes
from .argument_ref import RESULT_TOKEN
@@ -70,7 +72,6 @@
bound_error_context = FunctionDefinitionContext(func)
signature = inspect.signature(func)
- @wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
if not get_enable_check_shapes():
return func(*args, **kwargs)
@@ -137,10 +138,12 @@
return result
+ # Make TensorFlow understand our decoration:
+ tf.compat.v1.flags.tf_decorator.make_decorator(func, wrapped)
+
+ update_wrapper(wrapped, func)
set_check_shapes(wrapped, _check_shapes)
- wrapped.__doc__ = parse_and_rewrite_docstring(
- wrapped.__doc__, func_spec, bound_error_context
- )
+ wrapped.__doc__ = parse_and_rewrite_docstring(func.__doc__, func_spec, bound_error_context)
return cast(C, wrapped)
return _check_shapes
diff --git a/gpflow/experimental/check_shapes/exceptions.py b/gpflow/experimental/check_shapes/exceptions.py
--- a/gpflow/experimental/check_shapes/exceptions.py
+++ b/gpflow/experimental/check_shapes/exceptions.py
@@ -39,6 +39,9 @@
self.context = context
+ # Prevent Keras from rewriting our exception:
+ self._keras_call_info_injected = True
+
class VariableTypeError(CheckShapesError):
"""
|
{"golden_diff": "diff --git a/gpflow/experimental/check_shapes/decorator.py b/gpflow/experimental/check_shapes/decorator.py\n--- a/gpflow/experimental/check_shapes/decorator.py\n+++ b/gpflow/experimental/check_shapes/decorator.py\n@@ -15,9 +15,11 @@\n Decorator for checking the shapes of function using tf Tensors.\n \"\"\"\n import inspect\n-from functools import wraps\n+from functools import update_wrapper\n from typing import Any, Callable, Sequence, cast\n \n+import tensorflow as tf\n+\n from ..utils import experimental\n from .accessors import set_check_shapes\n from .argument_ref import RESULT_TOKEN\n@@ -70,7 +72,6 @@\n bound_error_context = FunctionDefinitionContext(func)\n signature = inspect.signature(func)\n \n- @wraps(func)\n def wrapped(*args: Any, **kwargs: Any) -> Any:\n if not get_enable_check_shapes():\n return func(*args, **kwargs)\n@@ -137,10 +138,12 @@\n \n return result\n \n+ # Make TensorFlow understand our decoration:\n+ tf.compat.v1.flags.tf_decorator.make_decorator(func, wrapped)\n+\n+ update_wrapper(wrapped, func)\n set_check_shapes(wrapped, _check_shapes)\n- wrapped.__doc__ = parse_and_rewrite_docstring(\n- wrapped.__doc__, func_spec, bound_error_context\n- )\n+ wrapped.__doc__ = parse_and_rewrite_docstring(func.__doc__, func_spec, bound_error_context)\n return cast(C, wrapped)\n \n return _check_shapes\ndiff --git a/gpflow/experimental/check_shapes/exceptions.py b/gpflow/experimental/check_shapes/exceptions.py\n--- a/gpflow/experimental/check_shapes/exceptions.py\n+++ b/gpflow/experimental/check_shapes/exceptions.py\n@@ -39,6 +39,9 @@\n \n self.context = context\n \n+ # Prevent Keras from rewriting our exception:\n+ self._keras_call_info_injected = True\n+\n \n class VariableTypeError(CheckShapesError):\n \"\"\"\n", "issue": "Bug check_shapes with keras layers\nHi @jesnie,\r\n\r\nI've been using the `check_shapes` decorator in eager the last couple of days and I really love it!\r\n\r\nI've encountered a small inconvenience when using it in combination with Keras layers. The following snippets show the weird behaviour:\r\n\r\n1. Custom keras layer without `check_shapes` decorator runs:\r\n```python\r\nfrom gpflow.experimental.check_shapes import check_shapes\r\nimport tensorflow as tf\r\n\r\nclass MyModel(tf.keras.Model):\r\n\r\n def call(self, x, y):\r\n return x + y\r\n\r\nlayer = MyModel()\r\nx = tf.random.normal((32, 3))\r\ny = tf.random.normal((32, 1))\r\nlayer(x, y)\r\n```\r\n\r\n2. Adding `check_shapes` decorator breaks the code:\r\n```python\r\nfrom gpflow.experimental.check_shapes import check_shapes\r\nimport tensorflow as tf\r\n\r\nclass MyModel(tf.keras.Model):\r\n\r\n @check_shapes(\r\n \"x: [batch, input_dim]\",\r\n \"y: [batch, 1]\",\r\n \"return: [batch, input_dim]\",\r\n )\r\n def call(self, x, y):\r\n return x + y\r\n\r\nlayer = MyModel()\r\nx = tf.random.normal((32, 3))\r\ny = tf.random.normal((32, 1))\r\nlayer(x, y)\r\n```\r\nas now the layer expects a `training` kwarg to be added. The following error is thrown:\r\n```\r\n[TypeError: call() got an unexpected keyword argument 'training']()\r\n```\r\n\r\n3. After adding a `training` kwarg the code runs again. However, this isn't necessary when only using \"native\" keras.\r\n```python\r\nfrom gpflow.experimental.check_shapes import check_shapes\r\nimport tensorflow as tf\r\n\r\nclass MyModel(tf.keras.Model):\r\n\r\n @check_shapes(\r\n \"x: [batch, input_dim]\",\r\n \"y: [batch, 1]\",\r\n \"return: [batch, input_dim]\",\r\n )\r\n def call(self, x, y, training = None):\r\n return x + y\r\n\r\nlayer = MyModel()\r\nx = tf.random.normal((32, 3))\r\ny = tf.random.normal((32, 1))\r\nlayer(x, y)\r\n```\r\n\r\nI'm using the following versions with python 3.8.13:\r\n```\r\ngpflow 2.4.0\r\ntensorflow 2.5.3\r\n```\r\n\r\nMany thanks,\r\nVincent\n", "before_files": [{"content": "# Copyright 2022 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDecorator for checking the shapes of function using tf Tensors.\n\"\"\"\nimport inspect\nfrom functools import wraps\nfrom typing import Any, Callable, Sequence, cast\n\nfrom ..utils import experimental\nfrom .accessors import set_check_shapes\nfrom .argument_ref import RESULT_TOKEN\nfrom .base_types import C\nfrom .checker import ShapeChecker\nfrom .checker_context import set_shape_checker\nfrom .config import get_enable_check_shapes\nfrom .error_contexts import (\n ConditionContext,\n FunctionCallContext,\n FunctionDefinitionContext,\n NoteContext,\n ParallelContext,\n StackContext,\n)\nfrom .parser import parse_and_rewrite_docstring, parse_function_spec\nfrom .specs import ParsedArgumentSpec\n\n\ndef null_check_shapes(func: C) -> C:\n \"\"\"\n Annotates the given function so that it looks like it has shape checks, but without actually\n checking anything.\n\n This is necessary not to break `@inherit_check_shapes` when shape checking is disabled.\n \"\"\"\n set_check_shapes(func, null_check_shapes)\n return func\n\n\n@experimental\ndef check_shapes(*specs: str) -> Callable[[C], C]:\n \"\"\"\n Decorator that checks the shapes of tensor arguments.\n\n :param specs: Specification of arguments to check. See: `Check specification`_.\n \"\"\"\n if not get_enable_check_shapes():\n return null_check_shapes\n\n unbound_error_context = FunctionCallContext(check_shapes)\n\n func_spec = parse_function_spec(specs, unbound_error_context)\n\n pre_specs = [spec for spec in func_spec.arguments if not spec.argument_ref.is_result]\n post_specs = [spec for spec in func_spec.arguments if spec.argument_ref.is_result]\n note_specs = func_spec.notes\n\n def _check_shapes(func: C) -> C:\n bound_error_context = FunctionDefinitionContext(func)\n signature = inspect.signature(func)\n\n @wraps(func)\n def wrapped(*args: Any, **kwargs: Any) -> Any:\n if not get_enable_check_shapes():\n return func(*args, **kwargs)\n\n try:\n bound_arguments = signature.bind(*args, **kwargs)\n except TypeError as e:\n # TypeError is raised if *args and **kwargs don't actually match the arguments of\n # `func`. In that case we just call `func` normally, which will also result in an\n # error, but an error with the error message the user is used to.\n func(*args, **kwargs)\n raise AssertionError(\n \"The above line should fail so this line should never be reached.\"\n ) from e\n bound_arguments.apply_defaults()\n arg_map = bound_arguments.arguments\n\n checker = ShapeChecker()\n for note_spec in note_specs:\n checker.add_context(StackContext(bound_error_context, NoteContext(note_spec)))\n\n def _check_specs(specs: Sequence[ParsedArgumentSpec]) -> None:\n processed_specs = []\n\n for arg_spec in specs:\n for arg_value, relative_arg_context in arg_spec.argument_ref.get(\n arg_map, bound_error_context\n ):\n arg_context = StackContext(bound_error_context, relative_arg_context)\n\n if arg_spec.condition is not None:\n condition, condition_context = arg_spec.condition.get(\n arg_map,\n StackContext(arg_context, ConditionContext(arg_spec.condition)),\n )\n if not condition:\n continue\n arg_context = StackContext(\n bound_error_context,\n ParallelContext(\n (\n StackContext(\n relative_arg_context,\n StackContext(\n ConditionContext(arg_spec.condition),\n condition_context,\n ),\n ),\n )\n ),\n )\n\n processed_specs.append((arg_value, arg_spec.tensor, arg_context))\n\n checker.check_shapes(processed_specs)\n\n _check_specs(pre_specs)\n\n with set_shape_checker(checker):\n result = func(*args, **kwargs)\n arg_map[RESULT_TOKEN] = result\n\n _check_specs(post_specs)\n\n return result\n\n set_check_shapes(wrapped, _check_shapes)\n wrapped.__doc__ = parse_and_rewrite_docstring(\n wrapped.__doc__, func_spec, bound_error_context\n )\n return cast(C, wrapped)\n\n return _check_shapes\n", "path": "gpflow/experimental/check_shapes/decorator.py"}, {"content": "# Copyright 2022 The GPflow Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=broad-except\n\n\"\"\"\nExceptions generated by `check_shapes`.\n\nThese rely heavily on the infrastructure in `error_contexts.py`.\n\"\"\"\nfrom .error_contexts import ErrorContext, MessageBuilder\n\n\nclass CheckShapesError(Exception):\n \"\"\"\n Common super class for `check_shapes` errors.\n \"\"\"\n\n error_message: str\n\n def __init__(self, context: ErrorContext) -> None:\n builder = MessageBuilder()\n builder.add_line(\"\")\n builder.add_line(self.error_message)\n with builder.indent() as b:\n context.print(b)\n super().__init__(builder.build())\n\n self.context = context\n\n\nclass VariableTypeError(CheckShapesError):\n \"\"\"\n Error raised if a variable is used both as a rank-1 and a variable-rank variable.\n \"\"\"\n\n error_message = (\n \"Cannot use the same variable to bind both a single dimension\"\n \" and a variable number of dimensions.\"\n )\n\n\nclass SpecificationParseError(CheckShapesError):\n \"\"\"\n Error raised if there was an error parsing the shape specification.\n \"\"\"\n\n error_message = \"Unable to parse shape specification.\"\n\n\nclass DocstringParseError(CheckShapesError):\n \"\"\"\n Error raised if there was an error parsing the shape specification.\n \"\"\"\n\n error_message = \"Unable to parse docstring.\"\n\n\nclass ArgumentReferenceError(CheckShapesError):\n \"\"\"\n Error raised if the argument to check the shape of could not be resolved.\n \"\"\"\n\n error_message = \"Unable to resolve argument / missing argument.\"\n\n\nclass ShapeMismatchError(CheckShapesError):\n \"\"\"\n Error raised if a function is called with tensors of the wrong shape.\n \"\"\"\n\n error_message = \"Tensor shape mismatch.\"\n\n\nclass NoShapeError(CheckShapesError):\n \"\"\"\n Error raised if we are trying to get the shape of an object that does not have a shape.\n \"\"\"\n\n error_message = \"Unable to determine shape of object.\"\n", "path": "gpflow/experimental/check_shapes/exceptions.py"}]}
| 3,172 | 448 |
gh_patches_debug_10089
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1587
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
admin: admins can usurp owners
I believe `admin` and `owner` are meant to be two separate permission levels for the bot. There are `@require_admin` and `@require_owner` for commands, implying that there are certain functions for an `owner` that should not be available to `admin`s.
That being said, if the `admin` plugin is enabled, an `admin` can simply:
<details>
<summary>Not a spoiler :sweat_smile: </summary>
<pre>.set core.owner <i>I-Am-gRoot</i></pre>
</details>
bypassing the <code>@require_owner</code>, and potentially locking them out of their own bot.
</issue>
<code>
[start of sopel/modules/admin.py]
1 # coding=utf-8
2 """
3 admin.py - Sopel Admin Module
4 Copyright 2010-2011, Sean B. Palmer (inamidst.com) and Michael Yanovich
5 (yanovich.net)
6 Copyright © 2012, Elad Alfassa, <[email protected]>
7 Copyright 2013, Ari Koivula <[email protected]>
8
9 Licensed under the Eiffel Forum License 2.
10
11 https://sopel.chat
12 """
13 from __future__ import unicode_literals, absolute_import, print_function, division
14
15 from sopel.config.types import (
16 StaticSection, ValidatedAttribute, FilenameAttribute
17 )
18 import sopel.module
19
20
21 class AdminSection(StaticSection):
22 hold_ground = ValidatedAttribute('hold_ground', bool, default=False)
23 """Auto re-join on kick"""
24 auto_accept_invite = ValidatedAttribute('auto_accept_invite', bool,
25 default=True)
26
27
28 def configure(config):
29 """
30 | name | example | purpose |
31 | ---- | ------- | ------- |
32 | hold\\_ground | False | Auto-rejoin the channel after being kicked. |
33 | auto\\_accept\\_invite | True | Auto-join channels when invited. |
34 """
35 config.define_section('admin', AdminSection)
36 config.admin.configure_setting('hold_ground',
37 "Automatically re-join after being kicked?")
38 config.admin.configure_setting('auto_accept_invite',
39 'Automatically join channels when invited?')
40
41
42 def setup(bot):
43 bot.config.define_section('admin', AdminSection)
44
45
46 @sopel.module.require_privmsg
47 @sopel.module.require_admin
48 @sopel.module.commands('join')
49 @sopel.module.priority('low')
50 @sopel.module.example('.join #example or .join #example key')
51 def join(bot, trigger):
52 """Join the specified channel. This is an admin-only command."""
53 channel, key = trigger.group(3), trigger.group(4)
54 if not channel:
55 return
56 elif not key:
57 bot.join(channel)
58 else:
59 bot.join(channel, key)
60
61
62 @sopel.module.require_privmsg
63 @sopel.module.require_admin
64 @sopel.module.commands('part')
65 @sopel.module.priority('low')
66 @sopel.module.example('.part #example')
67 def part(bot, trigger):
68 """Part the specified channel. This is an admin-only command."""
69 channel, _sep, part_msg = trigger.group(2).partition(' ')
70 if part_msg:
71 bot.part(channel, part_msg)
72 else:
73 bot.part(channel)
74
75
76 @sopel.module.require_privmsg
77 @sopel.module.require_owner
78 @sopel.module.commands('quit')
79 @sopel.module.priority('low')
80 def quit(bot, trigger):
81 """Quit from the server. This is an owner-only command."""
82 quit_message = trigger.group(2)
83 if not quit_message:
84 quit_message = 'Quitting on command from %s' % trigger.nick
85
86 bot.quit(quit_message)
87
88
89 @sopel.module.require_privmsg
90 @sopel.module.require_admin
91 @sopel.module.commands('msg')
92 @sopel.module.priority('low')
93 @sopel.module.example('.msg #YourPants Does anyone else smell neurotoxin?')
94 def msg(bot, trigger):
95 """
96 Send a message to a given channel or nick. Can only be done in privmsg by
97 an admin.
98 """
99 if trigger.group(2) is None:
100 return
101
102 channel, _sep, message = trigger.group(2).partition(' ')
103 message = message.strip()
104 if not channel or not message:
105 return
106
107 bot.msg(channel, message)
108
109
110 @sopel.module.require_privmsg
111 @sopel.module.require_admin
112 @sopel.module.commands('me')
113 @sopel.module.priority('low')
114 def me(bot, trigger):
115 """
116 Send an ACTION (/me) to a given channel or nick. Can only be done in
117 privmsg by an admin.
118 """
119 if trigger.group(2) is None:
120 return
121
122 channel, _sep, action = trigger.group(2).partition(' ')
123 action = action.strip()
124 if not channel or not action:
125 return
126
127 msg = '\x01ACTION %s\x01' % action
128 bot.msg(channel, msg)
129
130
131 @sopel.module.event('INVITE')
132 @sopel.module.rule('.*')
133 @sopel.module.priority('low')
134 def invite_join(bot, trigger):
135 """
136 Join a channel Sopel is invited to, if the inviter is an admin.
137 """
138 if trigger.admin or bot.config.admin.auto_accept_invite:
139 bot.join(trigger.args[1])
140 return
141
142
143 @sopel.module.event('KICK')
144 @sopel.module.rule(r'.*')
145 @sopel.module.priority('low')
146 def hold_ground(bot, trigger):
147 """
148 This function monitors all kicks across all channels Sopel is in. If it
149 detects that it is the one kicked it'll automatically join that channel.
150
151 WARNING: This may not be needed and could cause problems if Sopel becomes
152 annoying. Please use this with caution.
153 """
154 if bot.config.admin.hold_ground:
155 channel = trigger.sender
156 if trigger.args[1] == bot.nick:
157 bot.join(channel)
158
159
160 @sopel.module.require_privmsg
161 @sopel.module.require_admin
162 @sopel.module.commands('mode')
163 @sopel.module.priority('low')
164 def mode(bot, trigger):
165 """Set a user mode on Sopel. Can only be done in privmsg by an admin."""
166 mode = trigger.group(3)
167 bot.write(('MODE', bot.nick + ' ' + mode))
168
169
170 @sopel.module.require_privmsg("This command only works as a private message.")
171 @sopel.module.require_admin("This command requires admin privileges.")
172 @sopel.module.commands('set')
173 @sopel.module.example('.set core.owner Me')
174 def set_config(bot, trigger):
175 """See and modify values of Sopel's config object.
176
177 Trigger args:
178 arg1 - section and option, in the form "section.option"
179 arg2 - value
180
181 If there is no section, section will default to "core".
182 If value is None, the option will be deleted.
183 """
184 # Get section and option from first argument.
185 match = trigger.group(3)
186 if match is None:
187 bot.reply("Usage: .set section.option value")
188 return
189 arg1 = match.split('.')
190 if len(arg1) == 1:
191 section_name, option = "core", arg1[0]
192 elif len(arg1) == 2:
193 section_name, option = arg1
194 else:
195 bot.reply("Usage: .set section.option value")
196 return
197 section = getattr(bot.config, section_name)
198 static_sec = isinstance(section, StaticSection)
199
200 if static_sec and not hasattr(section, option):
201 bot.say('[{}] section has no option {}.'.format(section_name, option))
202 return
203
204 delim = trigger.group(2).find(' ')
205 # Skip preceding whitespaces, if any.
206 while delim > 0 and delim < len(trigger.group(2)) and trigger.group(2)[delim] == ' ':
207 delim = delim + 1
208
209 # Display current value if no value is given.
210 if delim == -1 or delim == len(trigger.group(2)):
211 if not static_sec and bot.config.parser.has_option(section, option):
212 bot.reply("Option %s.%s does not exist." % (section_name, option))
213 return
214 # Except if the option looks like a password. Censor those to stop them
215 # from being put on log files.
216 if option.endswith("password") or option.endswith("pass"):
217 value = "(password censored)"
218 else:
219 value = getattr(section, option)
220 bot.reply("%s.%s = %s" % (section_name, option, value))
221 return
222
223 # Otherwise, set the value to one given as argument 2.
224 value = trigger.group(2)[delim:]
225 if static_sec:
226 descriptor = getattr(section.__class__, option)
227 try:
228 if isinstance(descriptor, FilenameAttribute):
229 value = descriptor.parse(bot.config, descriptor, value)
230 else:
231 value = descriptor.parse(value)
232 except ValueError as exc:
233 bot.say("Can't set attribute: " + str(exc))
234 return
235 setattr(section, option, value)
236
237
238 @sopel.module.require_privmsg
239 @sopel.module.require_admin
240 @sopel.module.commands('save')
241 @sopel.module.example('.save')
242 def save_config(bot, trigger):
243 """Save state of Sopel's config object to the configuration file."""
244 bot.config.save()
245
[end of sopel/modules/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/admin.py b/sopel/modules/admin.py
--- a/sopel/modules/admin.py
+++ b/sopel/modules/admin.py
@@ -220,6 +220,13 @@
bot.reply("%s.%s = %s" % (section_name, option, value))
return
+ # 'core.owner' cannot be set by the bot. Any changes to this setting must be
+ # made manually in the config file.
+ if section_name == 'core' and option == 'owner':
+ bot.say('\'core.owner\' cannot be set by the bot. '
+ 'This setting must be changed manually in the configuration file.')
+ return
+
# Otherwise, set the value to one given as argument 2.
value = trigger.group(2)[delim:]
if static_sec:
|
{"golden_diff": "diff --git a/sopel/modules/admin.py b/sopel/modules/admin.py\n--- a/sopel/modules/admin.py\n+++ b/sopel/modules/admin.py\n@@ -220,6 +220,13 @@\n bot.reply(\"%s.%s = %s\" % (section_name, option, value))\n return\n \n+ # 'core.owner' cannot be set by the bot. Any changes to this setting must be\n+ # made manually in the config file.\n+ if section_name == 'core' and option == 'owner':\n+ bot.say('\\'core.owner\\' cannot be set by the bot. '\n+ 'This setting must be changed manually in the configuration file.')\n+ return\n+\n # Otherwise, set the value to one given as argument 2.\n value = trigger.group(2)[delim:]\n if static_sec:\n", "issue": "admin: admins can usurp owners\nI believe `admin` and `owner` are meant to be two separate permission levels for the bot. There are `@require_admin` and `@require_owner` for commands, implying that there are certain functions for an `owner` that should not be available to `admin`s. \r\n\r\nThat being said, if the `admin` plugin is enabled, an `admin` can simply:\r\n<details>\r\n<summary>Not a spoiler :sweat_smile: </summary>\r\n\r\n<pre>.set core.owner <i>I-Am-gRoot</i></pre> \r\n</details>\r\nbypassing the <code>@require_owner</code>, and potentially locking them out of their own bot.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nadmin.py - Sopel Admin Module\nCopyright 2010-2011, Sean B. Palmer (inamidst.com) and Michael Yanovich\n(yanovich.net)\nCopyright \u00a9 2012, Elad Alfassa, <[email protected]>\nCopyright 2013, Ari Koivula <[email protected]>\n\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, FilenameAttribute\n)\nimport sopel.module\n\n\nclass AdminSection(StaticSection):\n hold_ground = ValidatedAttribute('hold_ground', bool, default=False)\n \"\"\"Auto re-join on kick\"\"\"\n auto_accept_invite = ValidatedAttribute('auto_accept_invite', bool,\n default=True)\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | hold\\\\_ground | False | Auto-rejoin the channel after being kicked. |\n | auto\\\\_accept\\\\_invite | True | Auto-join channels when invited. |\n \"\"\"\n config.define_section('admin', AdminSection)\n config.admin.configure_setting('hold_ground',\n \"Automatically re-join after being kicked?\")\n config.admin.configure_setting('auto_accept_invite',\n 'Automatically join channels when invited?')\n\n\ndef setup(bot):\n bot.config.define_section('admin', AdminSection)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('join')\[email protected]('low')\[email protected]('.join #example or .join #example key')\ndef join(bot, trigger):\n \"\"\"Join the specified channel. This is an admin-only command.\"\"\"\n channel, key = trigger.group(3), trigger.group(4)\n if not channel:\n return\n elif not key:\n bot.join(channel)\n else:\n bot.join(channel, key)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('part')\[email protected]('low')\[email protected]('.part #example')\ndef part(bot, trigger):\n \"\"\"Part the specified channel. This is an admin-only command.\"\"\"\n channel, _sep, part_msg = trigger.group(2).partition(' ')\n if part_msg:\n bot.part(channel, part_msg)\n else:\n bot.part(channel)\n\n\[email protected]_privmsg\[email protected]_owner\[email protected]('quit')\[email protected]('low')\ndef quit(bot, trigger):\n \"\"\"Quit from the server. This is an owner-only command.\"\"\"\n quit_message = trigger.group(2)\n if not quit_message:\n quit_message = 'Quitting on command from %s' % trigger.nick\n\n bot.quit(quit_message)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('msg')\[email protected]('low')\[email protected]('.msg #YourPants Does anyone else smell neurotoxin?')\ndef msg(bot, trigger):\n \"\"\"\n Send a message to a given channel or nick. Can only be done in privmsg by\n an admin.\n \"\"\"\n if trigger.group(2) is None:\n return\n\n channel, _sep, message = trigger.group(2).partition(' ')\n message = message.strip()\n if not channel or not message:\n return\n\n bot.msg(channel, message)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('me')\[email protected]('low')\ndef me(bot, trigger):\n \"\"\"\n Send an ACTION (/me) to a given channel or nick. Can only be done in\n privmsg by an admin.\n \"\"\"\n if trigger.group(2) is None:\n return\n\n channel, _sep, action = trigger.group(2).partition(' ')\n action = action.strip()\n if not channel or not action:\n return\n\n msg = '\\x01ACTION %s\\x01' % action\n bot.msg(channel, msg)\n\n\[email protected]('INVITE')\[email protected]('.*')\[email protected]('low')\ndef invite_join(bot, trigger):\n \"\"\"\n Join a channel Sopel is invited to, if the inviter is an admin.\n \"\"\"\n if trigger.admin or bot.config.admin.auto_accept_invite:\n bot.join(trigger.args[1])\n return\n\n\[email protected]('KICK')\[email protected](r'.*')\[email protected]('low')\ndef hold_ground(bot, trigger):\n \"\"\"\n This function monitors all kicks across all channels Sopel is in. If it\n detects that it is the one kicked it'll automatically join that channel.\n\n WARNING: This may not be needed and could cause problems if Sopel becomes\n annoying. Please use this with caution.\n \"\"\"\n if bot.config.admin.hold_ground:\n channel = trigger.sender\n if trigger.args[1] == bot.nick:\n bot.join(channel)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('mode')\[email protected]('low')\ndef mode(bot, trigger):\n \"\"\"Set a user mode on Sopel. Can only be done in privmsg by an admin.\"\"\"\n mode = trigger.group(3)\n bot.write(('MODE', bot.nick + ' ' + mode))\n\n\[email protected]_privmsg(\"This command only works as a private message.\")\[email protected]_admin(\"This command requires admin privileges.\")\[email protected]('set')\[email protected]('.set core.owner Me')\ndef set_config(bot, trigger):\n \"\"\"See and modify values of Sopel's config object.\n\n Trigger args:\n arg1 - section and option, in the form \"section.option\"\n arg2 - value\n\n If there is no section, section will default to \"core\".\n If value is None, the option will be deleted.\n \"\"\"\n # Get section and option from first argument.\n match = trigger.group(3)\n if match is None:\n bot.reply(\"Usage: .set section.option value\")\n return\n arg1 = match.split('.')\n if len(arg1) == 1:\n section_name, option = \"core\", arg1[0]\n elif len(arg1) == 2:\n section_name, option = arg1\n else:\n bot.reply(\"Usage: .set section.option value\")\n return\n section = getattr(bot.config, section_name)\n static_sec = isinstance(section, StaticSection)\n\n if static_sec and not hasattr(section, option):\n bot.say('[{}] section has no option {}.'.format(section_name, option))\n return\n\n delim = trigger.group(2).find(' ')\n # Skip preceding whitespaces, if any.\n while delim > 0 and delim < len(trigger.group(2)) and trigger.group(2)[delim] == ' ':\n delim = delim + 1\n\n # Display current value if no value is given.\n if delim == -1 or delim == len(trigger.group(2)):\n if not static_sec and bot.config.parser.has_option(section, option):\n bot.reply(\"Option %s.%s does not exist.\" % (section_name, option))\n return\n # Except if the option looks like a password. Censor those to stop them\n # from being put on log files.\n if option.endswith(\"password\") or option.endswith(\"pass\"):\n value = \"(password censored)\"\n else:\n value = getattr(section, option)\n bot.reply(\"%s.%s = %s\" % (section_name, option, value))\n return\n\n # Otherwise, set the value to one given as argument 2.\n value = trigger.group(2)[delim:]\n if static_sec:\n descriptor = getattr(section.__class__, option)\n try:\n if isinstance(descriptor, FilenameAttribute):\n value = descriptor.parse(bot.config, descriptor, value)\n else:\n value = descriptor.parse(value)\n except ValueError as exc:\n bot.say(\"Can't set attribute: \" + str(exc))\n return\n setattr(section, option, value)\n\n\[email protected]_privmsg\[email protected]_admin\[email protected]('save')\[email protected]('.save')\ndef save_config(bot, trigger):\n \"\"\"Save state of Sopel's config object to the configuration file.\"\"\"\n bot.config.save()\n", "path": "sopel/modules/admin.py"}]}
| 3,233 | 188 |
gh_patches_debug_35633
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-583
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix several configuration issues
Fix the pending issues in #563 as pointed out by @mauriciovasquezbernal.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # FIXME find a better way to avoid all those "Expression has type "Any"" errors
16 # type: ignore
17
18 """
19 Simple configuration manager
20
21 This is a configuration manager for OpenTelemetry. It reads configuration
22 values from environment variables prefixed with
23 ``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.
24 The first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase
25 character.
26
27 For example, these environment variables will be read:
28
29 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
30 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
31 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
32
33 These won't:
34
35 1. ``OPENTELEMETRY_PYTH_SOMETHING``
36 2. ``OPENTELEMETRY_PYTHON_something``
37 3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``
38 4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
39
40 The values stored in the environment variables can be found in an instance of
41 ``opentelemetry.configuration.Configuration``. This class can be instantiated
42 freely because instantiating it returns a singleton.
43
44 For example, if the environment variable
45 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
46 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
47
48 Non defined attributes will always return ``None``. This is intended to make it
49 easier to use the ``Configuration`` object in actual code, because it won't be
50 necessary to check for the attribute to be defined first.
51
52 Environment variables used by OpenTelemetry
53 -------------------------------------------
54
55 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
56 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
57
58 The value of these environment variables should be the name of the entry point
59 that points to the class that implements either provider. This OpenTelemetry
60 API package provides one entry point for each, which can be found in the
61 setup.py file::
62
63 entry_points={
64 ...
65 "opentelemetry_meter_provider": [
66 "default_meter_provider = "
67 "opentelemetry.metrics:DefaultMeterProvider"
68 ],
69 "opentelemetry_tracer_provider": [
70 "default_tracer_provider = "
71 "opentelemetry.trace:DefaultTracerProvider"
72 ],
73 }
74
75 To use the meter provider above, then the
76 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
77 "default_meter_provider" (this is not actually necessary since the
78 OpenTelemetry API provided providers are the default ones used if no
79 configuration is found in the environment variables).
80 """
81
82 from os import environ
83 from re import fullmatch
84
85
86 class Configuration:
87 _instance = None
88
89 __slots__ = []
90
91 def __new__(cls) -> "Configuration":
92 if Configuration._instance is None:
93
94 for key, value in environ.items():
95
96 match = fullmatch("OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)", key)
97
98 if match is not None:
99
100 key = match.group(1).lower()
101
102 setattr(Configuration, "_{}".format(key), value)
103 setattr(
104 Configuration,
105 key,
106 property(
107 fget=lambda cls, key=key: getattr(
108 cls, "_{}".format(key)
109 )
110 ),
111 )
112
113 Configuration.__slots__.append(key)
114
115 Configuration.__slots__ = tuple(Configuration.__slots__)
116
117 Configuration._instance = object.__new__(cls)
118
119 return cls._instance
120
121 def __getattr__(self, name):
122 return None
123
[end of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -19,27 +19,27 @@
Simple configuration manager
This is a configuration manager for OpenTelemetry. It reads configuration
-values from environment variables prefixed with
-``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.
-The first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase
-character.
+values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
+characters are only alphanumeric characters and unserscores, except for the
+first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
For example, these environment variables will be read:
1. ``OPENTELEMETRY_PYTHON_SOMETHING``
2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
These won't:
1. ``OPENTELEMETRY_PYTH_SOMETHING``
-2. ``OPENTELEMETRY_PYTHON_something``
-3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``
-4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
+2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
+3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
The values stored in the environment variables can be found in an instance of
``opentelemetry.configuration.Configuration``. This class can be instantiated
-freely because instantiating it returns a singleton.
+freely because instantiating it returns always the same object.
For example, if the environment variable
``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
@@ -93,11 +93,13 @@
for key, value in environ.items():
- match = fullmatch("OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)", key)
+ match = fullmatch(
+ r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
+ )
if match is not None:
- key = match.group(1).lower()
+ key = match.group(1)
setattr(Configuration, "_{}".format(key), value)
setattr(
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -19,27 +19,27 @@\n Simple configuration manager\n \n This is a configuration manager for OpenTelemetry. It reads configuration\n-values from environment variables prefixed with\n-``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.\n-The first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase\n-character.\n+values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\n+characters are only alphanumeric characters and unserscores, except for the\n+first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n \n For example, these environment variables will be read:\n \n 1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n+4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n \n These won't:\n \n 1. ``OPENTELEMETRY_PYTH_SOMETHING``\n-2. ``OPENTELEMETRY_PYTHON_something``\n-3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``\n-4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n+2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n+3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n \n The values stored in the environment variables can be found in an instance of\n ``opentelemetry.configuration.Configuration``. This class can be instantiated\n-freely because instantiating it returns a singleton.\n+freely because instantiating it returns always the same object.\n \n For example, if the environment variable\n ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n@@ -93,11 +93,13 @@\n \n for key, value in environ.items():\n \n- match = fullmatch(\"OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)\", key)\n+ match = fullmatch(\n+ r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n+ )\n \n if match is not None:\n \n- key = match.group(1).lower()\n+ key = match.group(1)\n \n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n", "issue": "Fix several configuration issues\nFix the pending issues in #563 as pointed out by @mauriciovasquezbernal. \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with\n``OPENTELEMETRY_PYTHON_`` whose characters are only all caps and underscores.\nThe first character after ``OPENTELEMETRY_PYTHON_`` must be an uppercase\ncharacter.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_something``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_2_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns a singleton.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\"OPENTELEMETRY_PYTHON_([A-Z][A-Z_]*)\", key)\n\n if match is not None:\n\n key = match.group(1).lower()\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]}
| 1,774 | 638 |
gh_patches_debug_61676
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1710
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lower bound for typing_extensions?
* Faker version: 14.1.0
* OS: OpenSUSE Leap 15.3
For old Python versions (before Python 3.8), *faker* requires the *typing_extensions* with at least version 3.10.0.2. Due to some other dependencies inside my project, I am required to use version 3.7.4.3.
### Steps to reproduce
1. `pip install faker`
### Expected behavior
*faker* works with `typing_extensions==3.7.4.3` as well, to allow better integration with larger systems which are stuck at such older versions for some reasons.
### Actual behavior
*faker* declares `typing_extensions>=3.10.0.2` (#1557), although I could see no issue when using it with `typing_extensions==3.7.4.3`.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from pathlib import Path
4
5 from setuptools import find_packages, setup
6
7 here = Path(__file__).resolve().parent
8 README = (here / "README.rst").read_text(encoding="utf-8")
9 VERSION = (here / "VERSION").read_text(encoding="utf-8").strip()
10
11 excluded_packages = ["docs", "tests", "tests.*"]
12
13
14 # this module can be zip-safe if the zipimporter implements iter_modules or if
15 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
16 try:
17 import pkgutil
18 import zipimport
19
20 zip_safe = (
21 hasattr(zipimport.zipimporter, "iter_modules")
22 or zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
23 )
24 except AttributeError:
25 zip_safe = False
26
27 setup(
28 name="Faker",
29 version=VERSION,
30 description="Faker is a Python package that generates fake data for you.",
31 long_description=README,
32 entry_points={
33 "console_scripts": ["faker=faker.cli:execute_from_command_line"],
34 "pytest11": ["faker = faker.contrib.pytest.plugin"],
35 },
36 classifiers=[
37 # See https://pypi.org/pypi?%3Aaction=list_classifiers
38 "Development Status :: 5 - Production/Stable",
39 "Environment :: Console",
40 "Intended Audience :: Developers",
41 "Programming Language :: Python",
42 "Programming Language :: Python :: 3",
43 "Programming Language :: Python :: 3 :: Only",
44 "Programming Language :: Python :: 3.6",
45 "Programming Language :: Python :: 3.7",
46 "Programming Language :: Python :: 3.8",
47 "Programming Language :: Python :: 3.9",
48 "Programming Language :: Python :: Implementation :: CPython",
49 "Programming Language :: Python :: Implementation :: PyPy",
50 "Topic :: Software Development :: Libraries :: Python Modules",
51 "Topic :: Software Development :: Testing",
52 "Topic :: Utilities",
53 "License :: OSI Approved :: MIT License",
54 ],
55 keywords="faker fixtures data test mock generator",
56 author="joke2k",
57 author_email="[email protected]",
58 url="https://github.com/joke2k/faker",
59 project_urls={
60 "Bug Tracker": "https://github.com/joke2k/faker/issues",
61 "Changes": "https://github.com/joke2k/faker/blob/master/CHANGELOG.md",
62 "Documentation": "http://faker.rtfd.org/",
63 "Source Code": "https://github.com/joke2k/faker",
64 },
65 license="MIT License",
66 packages=find_packages(exclude=excluded_packages),
67 package_data={
68 "faker": ["py.typed"],
69 },
70 platforms=["any"],
71 zip_safe=zip_safe,
72 python_requires=">=3.6",
73 install_requires=[
74 "python-dateutil>=2.4",
75 "typing-extensions>=3.10.0.2;python_version<'3.8'",
76 ],
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,6 +72,6 @@
python_requires=">=3.6",
install_requires=[
"python-dateutil>=2.4",
- "typing-extensions>=3.10.0.2;python_version<'3.8'",
+ "typing-extensions>=3.7.4.3;python_version<'3.8'",
],
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,6 +72,6 @@\n python_requires=\">=3.6\",\n install_requires=[\n \"python-dateutil>=2.4\",\n- \"typing-extensions>=3.10.0.2;python_version<'3.8'\",\n+ \"typing-extensions>=3.7.4.3;python_version<'3.8'\",\n ],\n )\n", "issue": "Lower bound for typing_extensions?\n* Faker version: 14.1.0\r\n* OS: OpenSUSE Leap 15.3\r\n\r\nFor old Python versions (before Python 3.8), *faker* requires the *typing_extensions* with at least version 3.10.0.2. Due to some other dependencies inside my project, I am required to use version 3.7.4.3.\r\n\r\n### Steps to reproduce\r\n\r\n1. `pip install faker`\r\n\r\n### Expected behavior\r\n\r\n*faker* works with `typing_extensions==3.7.4.3` as well, to allow better integration with larger systems which are stuck at such older versions for some reasons.\r\n\r\n### Actual behavior\r\n\r\n*faker* declares `typing_extensions>=3.10.0.2` (#1557), although I could see no issue when using it with `typing_extensions==3.7.4.3`.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nhere = Path(__file__).resolve().parent\nREADME = (here / \"README.rst\").read_text(encoding=\"utf-8\")\nVERSION = (here / \"VERSION\").read_text(encoding=\"utf-8\").strip()\n\nexcluded_packages = [\"docs\", \"tests\", \"tests.*\"]\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n\n zip_safe = (\n hasattr(zipimport.zipimporter, \"iter_modules\")\n or zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\n )\nexcept AttributeError:\n zip_safe = False\n\nsetup(\n name=\"Faker\",\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n \"console_scripts\": [\"faker=faker.cli:execute_from_command_line\"],\n \"pytest11\": [\"faker = faker.contrib.pytest.plugin\"],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords=\"faker fixtures data test mock generator\",\n author=\"joke2k\",\n author_email=\"[email protected]\",\n url=\"https://github.com/joke2k/faker\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/joke2k/faker/issues\",\n \"Changes\": \"https://github.com/joke2k/faker/blob/master/CHANGELOG.md\",\n \"Documentation\": \"http://faker.rtfd.org/\",\n \"Source Code\": \"https://github.com/joke2k/faker\",\n },\n license=\"MIT License\",\n packages=find_packages(exclude=excluded_packages),\n package_data={\n \"faker\": [\"py.typed\"],\n },\n platforms=[\"any\"],\n zip_safe=zip_safe,\n python_requires=\">=3.6\",\n install_requires=[\n \"python-dateutil>=2.4\",\n \"typing-extensions>=3.10.0.2;python_version<'3.8'\",\n ],\n)\n", "path": "setup.py"}]}
| 1,519 | 103 |
gh_patches_debug_14376
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-agent-495
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
apache check doesn't support basic auth
If I want basic auth, I'd assume that all I have to do is make my config look like:
```
apache_status_url: http://user:[email protected]:80/server-status?auto
```
But actually that just doesn't work. We need to do some special incantations to make `urllib2` do basic auth.
</issue>
<code>
[start of checks.d/apache.py]
1 import urllib2
2
3 from util import headers
4 from checks import AgentCheck
5
6 class Apache(AgentCheck):
7 """Tracks basic connection/requests/workers metrics
8
9 See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details
10 """
11 GAUGES = {
12 'IdleWorkers': 'apache.performance.idle_workers',
13 'BusyWorkers': 'apache.performance.busy_workers',
14 'CPULoad': 'apache.performance.cpu_load',
15 'Uptime': 'apache.performance.uptime',
16 'Total kBytes': 'apache.net.bytes',
17 'Total Accesses': 'apache.net.hits',
18 }
19
20 RATES = {
21 'Total kBytes': 'apache.net.bytes_per_s',
22 'Total Accesses': 'apache.net.request_per_s'
23 }
24
25 def __init__(self, name, init_config, agentConfig, instances=None):
26 AgentCheck.__init__(self, name, init_config, agentConfig, instances)
27 self.assumed_url = {}
28
29 def check(self, instance):
30 if 'apache_status_url' not in instance:
31 raise Exception("Missing 'apache_status_url' in Apache config")
32
33 url = self.assumed_url.get(instance['apache_status_url'], instance['apache_status_url'])
34
35 tags = instance.get('tags', [])
36 req = urllib2.Request(url, None,
37 headers(self.agentConfig))
38 request = urllib2.urlopen(req)
39 response = request.read()
40
41 metric_count = 0
42 # Loop through and extract the numerical values
43 for line in response.split('\n'):
44 values = line.split(': ')
45 if len(values) == 2: # match
46 metric, value = values
47 try:
48 value = float(value)
49 except ValueError:
50 continue
51
52 # Special case: kBytes => bytes
53 if metric == 'Total kBytes':
54 value = value * 1024
55
56 # Send metric as a gauge, if applicable
57 if metric in self.GAUGES:
58 metric_count += 1
59 metric_name = self.GAUGES[metric]
60 self.gauge(metric_name, value, tags=tags)
61
62 # Send metric as a rate, if applicable
63 if metric in self.RATES:
64 metric_count += 1
65 metric_name = self.RATES[metric]
66 self.rate(metric_name, value, tags=tags)
67
68 if metric_count == 0:
69 if self.assumed_url.get(instance['apache_status_url'], None) is None and url[-5:] != '?auto':
70 self.assumed_url[instance['apache_status_url']]= '%s?auto' % url
71 self.warning("Assuming url was not correct. Trying to add ?auto suffix to the url")
72 self.check(instance)
73 else:
74 raise Exception("No metrics were fetched for this instance. Make sure that %s is the proper url." % instance['apache_status_url'])
75
76
77 @staticmethod
78 def parse_agent_config(agentConfig):
79 if not agentConfig.get('apache_status_url'):
80 return False
81
82 return {
83 'instances': [{'apache_status_url': agentConfig.get('apache_status_url')}]
84 }
85
[end of checks.d/apache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checks.d/apache.py b/checks.d/apache.py
--- a/checks.d/apache.py
+++ b/checks.d/apache.py
@@ -1,4 +1,5 @@
import urllib2
+import base64
from util import headers
from checks import AgentCheck
@@ -35,6 +36,10 @@
tags = instance.get('tags', [])
req = urllib2.Request(url, None,
headers(self.agentConfig))
+ if 'apache_user' in instance and 'apache_password' in instance:
+ auth_str = '%s:%s' % (instance['apache_user'], instance['apache_password'])
+ encoded_auth_str = base64.encodestring(auth_str)
+ req.add_header("Authorization", "Basic %s" % encoded_auth_str)
request = urllib2.urlopen(req)
response = request.read()
|
{"golden_diff": "diff --git a/checks.d/apache.py b/checks.d/apache.py\n--- a/checks.d/apache.py\n+++ b/checks.d/apache.py\n@@ -1,4 +1,5 @@\n import urllib2\n+import base64\n \n from util import headers\n from checks import AgentCheck\n@@ -35,6 +36,10 @@\n tags = instance.get('tags', [])\n req = urllib2.Request(url, None,\n headers(self.agentConfig))\n+ if 'apache_user' in instance and 'apache_password' in instance:\n+ auth_str = '%s:%s' % (instance['apache_user'], instance['apache_password'])\n+ encoded_auth_str = base64.encodestring(auth_str)\n+ req.add_header(\"Authorization\", \"Basic %s\" % encoded_auth_str)\n request = urllib2.urlopen(req)\n response = request.read()\n", "issue": "apache check doesn't support basic auth\nIf I want basic auth, I'd assume that all I have to do is make my config look like:\n\n```\napache_status_url: http://user:[email protected]:80/server-status?auto\n```\n\nBut actually that just doesn't work. We need to do some special incantations to make `urllib2` do basic auth.\n\n", "before_files": [{"content": "import urllib2\n\nfrom util import headers\nfrom checks import AgentCheck\n\nclass Apache(AgentCheck):\n \"\"\"Tracks basic connection/requests/workers metrics\n\n See http://httpd.apache.org/docs/2.2/mod/mod_status.html for more details\n \"\"\"\n GAUGES = {\n 'IdleWorkers': 'apache.performance.idle_workers',\n 'BusyWorkers': 'apache.performance.busy_workers',\n 'CPULoad': 'apache.performance.cpu_load',\n 'Uptime': 'apache.performance.uptime',\n 'Total kBytes': 'apache.net.bytes',\n 'Total Accesses': 'apache.net.hits',\n }\n\n RATES = {\n 'Total kBytes': 'apache.net.bytes_per_s',\n 'Total Accesses': 'apache.net.request_per_s'\n }\n\n def __init__(self, name, init_config, agentConfig, instances=None):\n AgentCheck.__init__(self, name, init_config, agentConfig, instances)\n self.assumed_url = {}\n\n def check(self, instance):\n if 'apache_status_url' not in instance:\n raise Exception(\"Missing 'apache_status_url' in Apache config\")\n\n url = self.assumed_url.get(instance['apache_status_url'], instance['apache_status_url'])\n\n tags = instance.get('tags', [])\n req = urllib2.Request(url, None,\n headers(self.agentConfig))\n request = urllib2.urlopen(req)\n response = request.read()\n\n metric_count = 0\n # Loop through and extract the numerical values\n for line in response.split('\\n'):\n values = line.split(': ')\n if len(values) == 2: # match\n metric, value = values\n try:\n value = float(value)\n except ValueError:\n continue\n\n # Special case: kBytes => bytes\n if metric == 'Total kBytes':\n value = value * 1024\n\n # Send metric as a gauge, if applicable\n if metric in self.GAUGES:\n metric_count += 1\n metric_name = self.GAUGES[metric]\n self.gauge(metric_name, value, tags=tags)\n\n # Send metric as a rate, if applicable\n if metric in self.RATES:\n metric_count += 1\n metric_name = self.RATES[metric]\n self.rate(metric_name, value, tags=tags)\n\n if metric_count == 0:\n if self.assumed_url.get(instance['apache_status_url'], None) is None and url[-5:] != '?auto':\n self.assumed_url[instance['apache_status_url']]= '%s?auto' % url\n self.warning(\"Assuming url was not correct. Trying to add ?auto suffix to the url\")\n self.check(instance)\n else:\n raise Exception(\"No metrics were fetched for this instance. Make sure that %s is the proper url.\" % instance['apache_status_url'])\n\n\n @staticmethod\n def parse_agent_config(agentConfig):\n if not agentConfig.get('apache_status_url'):\n return False\n\n return {\n 'instances': [{'apache_status_url': agentConfig.get('apache_status_url')}]\n }\n", "path": "checks.d/apache.py"}]}
| 1,464 | 187 |
gh_patches_debug_4502
|
rasdani/github-patches
|
git_diff
|
deis__deis-1495
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CoreOS on Google Compute Engine
Could you add Compute Engine to contrib?
http://googlecloudplatform.blogspot.cz/2014/05/official-coreos-images-are-now-available-on-google-compute-engine.html?m=1
</issue>
<code>
[start of client/setup.py]
1 #!/usr/bin/env python
2
3 """Install the Deis command-line client."""
4
5
6 try:
7 from setuptools import setup
8 USE_SETUPTOOLS = True
9 except ImportError:
10 from distutils.core import setup
11 USE_SETUPTOOLS = False
12
13 try:
14 LONG_DESCRIPTION = open('README.rst').read()
15 except IOError:
16 LONG_DESCRIPTION = 'Deis command-line client'
17
18 try:
19 APACHE_LICENSE = open('LICENSE').read()
20 except IOError:
21 APACHE_LICENSE = 'See http://www.apache.org/licenses/LICENSE-2.0'
22
23 KWARGS = {}
24 if USE_SETUPTOOLS:
25 KWARGS = {'entry_points': {'console_scripts': ['deis = deis:main']}}
26 else:
27 KWARGS = {'scripts': ['deis']}
28
29
30 setup(name='deis',
31 version='0.11.0',
32 license=APACHE_LICENSE,
33 description='Command-line Client for Deis, the open PaaS',
34 author='OpDemand',
35 author_email='[email protected]',
36 url='https://github.com/deis/deis',
37 keywords=[
38 'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',
39 'aws', 'ec2', 'rackspace', 'digitalocean'
40 ],
41 classifiers=[
42 'Development Status :: 4 - Beta',
43 'Environment :: Console',
44 'Intended Audience :: Developers',
45 'Intended Audience :: Information Technology',
46 'Intended Audience :: System Administrators',
47 'License :: OSI Approved :: Apache Software License',
48 'Operating System :: OS Independent',
49 'Programming Language :: Python',
50 'Programming Language :: Python :: 2.7',
51 'Topic :: Internet',
52 'Topic :: System :: Systems Administration',
53 ],
54 py_modules=['deis'],
55 data_files=[
56 ('.', ['README.rst']),
57 ],
58 long_description=LONG_DESCRIPTION,
59 install_requires=[
60 'docopt==0.6.1', 'python-dateutil==2.2',
61 'PyYAML==3.10', 'requests==2.2.1', 'urllib3==1.8.2'
62 ],
63 zip_safe=True,
64 **KWARGS)
65
[end of client/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/client/setup.py b/client/setup.py
--- a/client/setup.py
+++ b/client/setup.py
@@ -36,7 +36,7 @@
url='https://github.com/deis/deis',
keywords=[
'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',
- 'aws', 'ec2', 'rackspace', 'digitalocean'
+ 'aws', 'ec2', 'rackspace', 'digitalocean', 'gce'
],
classifiers=[
'Development Status :: 4 - Beta',
|
{"golden_diff": "diff --git a/client/setup.py b/client/setup.py\n--- a/client/setup.py\n+++ b/client/setup.py\n@@ -36,7 +36,7 @@\n url='https://github.com/deis/deis',\n keywords=[\n 'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',\n- 'aws', 'ec2', 'rackspace', 'digitalocean'\n+ 'aws', 'ec2', 'rackspace', 'digitalocean', 'gce'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n", "issue": "CoreOS on Google Compute Engine\nCould you add Compute Engine to contrib?\n\nhttp://googlecloudplatform.blogspot.cz/2014/05/official-coreos-images-are-now-available-on-google-compute-engine.html?m=1\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"Install the Deis command-line client.\"\"\"\n\n\ntry:\n from setuptools import setup\n USE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n USE_SETUPTOOLS = False\n\ntry:\n LONG_DESCRIPTION = open('README.rst').read()\nexcept IOError:\n LONG_DESCRIPTION = 'Deis command-line client'\n\ntry:\n APACHE_LICENSE = open('LICENSE').read()\nexcept IOError:\n APACHE_LICENSE = 'See http://www.apache.org/licenses/LICENSE-2.0'\n\nKWARGS = {}\nif USE_SETUPTOOLS:\n KWARGS = {'entry_points': {'console_scripts': ['deis = deis:main']}}\nelse:\n KWARGS = {'scripts': ['deis']}\n\n\nsetup(name='deis',\n version='0.11.0',\n license=APACHE_LICENSE,\n description='Command-line Client for Deis, the open PaaS',\n author='OpDemand',\n author_email='[email protected]',\n url='https://github.com/deis/deis',\n keywords=[\n 'opdemand', 'deis', 'paas', 'cloud', 'chef', 'docker', 'heroku',\n 'aws', 'ec2', 'rackspace', 'digitalocean'\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet',\n 'Topic :: System :: Systems Administration',\n ],\n py_modules=['deis'],\n data_files=[\n ('.', ['README.rst']),\n ],\n long_description=LONG_DESCRIPTION,\n install_requires=[\n 'docopt==0.6.1', 'python-dateutil==2.2',\n 'PyYAML==3.10', 'requests==2.2.1', 'urllib3==1.8.2'\n ],\n zip_safe=True,\n **KWARGS)\n", "path": "client/setup.py"}]}
| 1,186 | 134 |
gh_patches_debug_5489
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-2005
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request: RVA-based breakpoints
**Is your feature request related to a problem? Please describe.**
I am missing a command that would allow creating a breakpoint relative to some image - either main program or a library - for example at offset 0x1337 from libc base.
**Describe the solution you'd like**
A new command that would enable this, for example
````
bp libc.so+0x1337
````
**Describe alternatives you've considered**
The alternative is to manually find the image bases which is tedious.
</issue>
<code>
[start of pwndbg/gdblib/functions.py]
1 """
2 Put all functions defined for gdb in here.
3
4 This file might be changed into a module in the future.
5 """
6
7 from __future__ import annotations
8
9 import functools
10 from typing import Any
11 from typing import Callable
12 from typing import List
13
14 import gdb
15
16 import pwndbg.gdblib.elf
17 import pwndbg.gdblib.proc
18
19 functions: List[_GdbFunction] = []
20
21
22 def GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:
23 return functools.partial(_GdbFunction, only_when_running=only_when_running)
24
25
26 class _GdbFunction(gdb.Function):
27 def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:
28 self.name = func.__name__
29 self.func = func
30 self.only_when_running = only_when_running
31
32 functions.append(self)
33
34 super().__init__(self.name)
35
36 functools.update_wrapper(self, func)
37 self.__doc__ = func.__doc__
38
39 def invoke(self, *args: gdb.Value) -> Any:
40 if self.only_when_running and not pwndbg.gdblib.proc.alive:
41 # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`
42 # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'
43 return ""
44
45 return self.func(*args)
46
47 def __call__(self, *args: gdb.Value) -> Any:
48 return self.invoke(*args)
49
50
51 @GdbFunction(only_when_running=True)
52 def rebase(addr: gdb.Value | int) -> int:
53 """Return rebased address."""
54 base = pwndbg.gdblib.elf.exe().address
55 return base + int(addr)
56
[end of pwndbg/gdblib/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/gdblib/functions.py b/pwndbg/gdblib/functions.py
--- a/pwndbg/gdblib/functions.py
+++ b/pwndbg/gdblib/functions.py
@@ -53,3 +53,16 @@
"""Return rebased address."""
base = pwndbg.gdblib.elf.exe().address
return base + int(addr)
+
+
+@GdbFunction(only_when_running=True)
+def base(name_pattern: gdb.Value | str) -> int:
+ if isinstance(name_pattern, gdb.Value):
+ name = name_pattern.string()
+ else:
+ name = name_pattern
+
+ for p in pwndbg.gdblib.vmmap.get():
+ if name in p.objfile:
+ return p.vaddr
+ raise ValueError(f"No mapping named {name}")
|
{"golden_diff": "diff --git a/pwndbg/gdblib/functions.py b/pwndbg/gdblib/functions.py\n--- a/pwndbg/gdblib/functions.py\n+++ b/pwndbg/gdblib/functions.py\n@@ -53,3 +53,16 @@\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n+\n+\n+@GdbFunction(only_when_running=True)\n+def base(name_pattern: gdb.Value | str) -> int:\n+ if isinstance(name_pattern, gdb.Value):\n+ name = name_pattern.string()\n+ else:\n+ name = name_pattern\n+\n+ for p in pwndbg.gdblib.vmmap.get():\n+ if name in p.objfile:\n+ return p.vaddr\n+ raise ValueError(f\"No mapping named {name}\")\n", "issue": "Feature request: RVA-based breakpoints\n**Is your feature request related to a problem? Please describe.**\r\nI am missing a command that would allow creating a breakpoint relative to some image - either main program or a library - for example at offset 0x1337 from libc base.\r\n\r\n**Describe the solution you'd like**\r\nA new command that would enable this, for example\r\n\r\n````\r\nbp libc.so+0x1337\r\n````\r\n\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is to manually find the image bases which is tedious.\r\n\n", "before_files": [{"content": "\"\"\"\nPut all functions defined for gdb in here.\n\nThis file might be changed into a module in the future.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport functools\nfrom typing import Any\nfrom typing import Callable\nfrom typing import List\n\nimport gdb\n\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.proc\n\nfunctions: List[_GdbFunction] = []\n\n\ndef GdbFunction(only_when_running: bool = False) -> Callable[..., Any]:\n return functools.partial(_GdbFunction, only_when_running=only_when_running)\n\n\nclass _GdbFunction(gdb.Function):\n def __init__(self, func: Callable[..., Any], only_when_running: bool) -> None:\n self.name = func.__name__\n self.func = func\n self.only_when_running = only_when_running\n\n functions.append(self)\n\n super().__init__(self.name)\n\n functools.update_wrapper(self, func)\n self.__doc__ = func.__doc__\n\n def invoke(self, *args: gdb.Value) -> Any:\n if self.only_when_running and not pwndbg.gdblib.proc.alive:\n # Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`\n # Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'\n return \"\"\n\n return self.func(*args)\n\n def __call__(self, *args: gdb.Value) -> Any:\n return self.invoke(*args)\n\n\n@GdbFunction(only_when_running=True)\ndef rebase(addr: gdb.Value | int) -> int:\n \"\"\"Return rebased address.\"\"\"\n base = pwndbg.gdblib.elf.exe().address\n return base + int(addr)\n", "path": "pwndbg/gdblib/functions.py"}]}
| 1,145 | 185 |
gh_patches_debug_20737
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-10723
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Say all on Desktop raises an error
### Steps to reproduce:
1. Focus the desktop.
2. Invoke caret say all
### Actual behavior:
The following error is raised:
```
ERROR - scriptHandler.executeScript (15:54:57.769):
error executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'
Traceback (most recent call last):
File "scriptHandler.pyc", line 190, in executeScript
File "globalCommands.pyc", line 1334, in script_sayAll
File "sayAllHandler.pyc", line 79, in readText
File "sayAllHandler.pyc", line 119, in nextLine
AttributeError: '_TextReader' object has no attribute 'reader'
ERROR - stderr (15:54:57.779):
Exception ignored in:
ERROR - stderr (15:54:57.790):
<function _TextReader.__del__ at 0x0462F390>
ERROR - stderr (15:54:57.803):
Traceback (most recent call last):
ERROR - stderr (15:54:57.815):
File "sayAllHandler.pyc", line 213, in __del__
ERROR - stderr (15:54:57.827):
File "sayAllHandler.pyc", line 206, in stop
ERROR - stderr (15:54:57.839):
AttributeError
ERROR - stderr (15:54:57.851):
:
ERROR - stderr (15:54:57.863):
'_TextReader' object has no attribute 'reader'
```
### Expected behavior:
NO error
### System configuration
#### NVDA installed/portable/running from source:
Installed
#### NVDA version:
threshold-18069
#### Windows version:
Windows 10 1903 build 18362.239
</issue>
<code>
[start of source/sayAllHandler.py]
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2017 NV Access Limited
3 # This file may be used under the terms of the GNU General Public License, version 2 or later.
4 # For more details see: https://www.gnu.org/licenses/gpl-2.0.html
5
6 import weakref
7 import speech
8 import synthDriverHandler
9 from logHandler import log
10 import config
11 import controlTypes
12 import api
13 import textInfos
14 import queueHandler
15 import winKernel
16
17 CURSOR_CARET = 0
18 CURSOR_REVIEW = 1
19
20 lastSayAllMode = None
21 #: The active say all manager.
22 #: This is a weakref because the manager should be allowed to die once say all is complete.
23 _activeSayAll = lambda: None # Return None when called like a dead weakref.
24
25 def stop():
26 active = _activeSayAll()
27 if active:
28 active.stop()
29
30 def isRunning():
31 """Determine whether say all is currently running.
32 @return: C{True} if say all is currently running, C{False} if not.
33 @rtype: bool
34 """
35 return bool(_activeSayAll())
36
37 def readObjects(obj):
38 global _activeSayAll
39 reader = _ObjectsReader(obj)
40 _activeSayAll = weakref.ref(reader)
41 reader.next()
42
43 class _ObjectsReader(object):
44
45 def __init__(self, root):
46 self.walker = self.walk(root)
47 self.prevObj = None
48
49 def walk(self, obj):
50 yield obj
51 child=obj.simpleFirstChild
52 while child:
53 for descendant in self.walk(child):
54 yield descendant
55 child=child.simpleNext
56
57 def next(self):
58 if not self.walker:
59 # We were stopped.
60 return
61 if self.prevObj:
62 # We just started speaking this object, so move the navigator to it.
63 api.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)
64 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
65 # Move onto the next object.
66 self.prevObj = obj = next(self.walker, None)
67 if not obj:
68 return
69 # Call this method again when we start speaking this object.
70 callbackCommand = speech.CallbackCommand(self.next, name="say-all:next")
71 speech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)
72
73 def stop(self):
74 self.walker = None
75
76 def readText(cursor):
77 global lastSayAllMode, _activeSayAll
78 lastSayAllMode=cursor
79 reader = _TextReader(cursor)
80 _activeSayAll = weakref.ref(reader)
81 reader.nextLine()
82
83 class _TextReader(object):
84 """Manages continuous reading of text.
85 This is intended for internal use only.
86
87 The high level flow of control is as follows:
88 1. The constructor sets things up.
89 2. L{nextLine} is called to read the first line.
90 3. When it speaks a line, L{nextLine} request that L{lineReached} be called
91 when we start speaking this line, providing the position and state at this point.
92 4. When we start speaking a line, L{lineReached} is called
93 and moves the cursor to that line.
94 5. L{lineReached} calls L{nextLine}.
95 6. If there are more lines, L{nextLine} works as per steps 3 and 4.
96 7. Otherwise, if the object doesn't support page turns, we're finished.
97 8. If the object does support page turns,
98 we request that L{turnPage} be called when speech is finished.
99 9. L{turnPage} tries to turn the page.
100 10. If there are no more pages, we're finished.
101 11. If there is another page, L{turnPage} calls L{nextLine}.
102 """
103 MAX_BUFFERED_LINES = 10
104
105 def __init__(self, cursor):
106 self.cursor = cursor
107 self.trigger = SayAllProfileTrigger()
108 self.trigger.enter()
109 # Start at the cursor.
110 if cursor == CURSOR_CARET:
111 try:
112 self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
113 except (NotImplementedError, RuntimeError):
114 return
115 else:
116 self.reader = api.getReviewPosition()
117 self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
118 self.numBufferedLines = 0
119
120 def nextLine(self):
121 if not self.reader:
122 log.debug("no self.reader")
123 # We were stopped.
124 return
125 if not self.reader.obj:
126 log.debug("no self.reader.obj")
127 # The object died, so we should too.
128 self.finish()
129 return
130 bookmark = self.reader.bookmark
131 # Expand to the current line.
132 # We use move end rather than expand
133 # because the user might start in the middle of a line
134 # and we don't want to read from the start of the line in that case.
135 # For lines after the first, it's also more efficient because
136 # we're already at the start of the line, so there's no need to search backwards.
137 delta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint="end")
138 if delta <= 0:
139 # No more text.
140 if isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):
141 # Once the last line finishes reading, try turning the page.
142 cb = speech.CallbackCommand(self.turnPage, name="say-all:turnPage")
143 speech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])
144 else:
145 self.finish()
146 return
147
148 # Copy the speakTextInfoState so that speak callbackCommand
149 # and its associated callback are using a copy isolated to this specific line.
150 state = self.speakTextInfoState.copy()
151 # Call lineReached when we start speaking this line.
152 # lineReached will move the cursor and trigger reading of the next line.
153
154 def _onLineReached(obj=self.reader.obj, state=state):
155 self.lineReached(obj, bookmark, state)
156
157 cb = speech.CallbackCommand(
158 _onLineReached,
159 name="say-all:lineReached"
160 )
161
162 # Generate the speech sequence for the reader textInfo
163 # and insert the lineReached callback at the very beginning of the sequence.
164 # _linePrefix on speakTextInfo cannot be used here
165 # As it would be inserted in the sequence after all initial control starts which is too late.
166 speechGen = speech.getTextInfoSpeech(
167 self.reader,
168 unit=textInfos.UNIT_READINGCHUNK,
169 reason=controlTypes.REASON_SAYALL,
170 useCache=state
171 )
172 seq = list(speech._flattenNestedSequences(speechGen))
173 seq.insert(0, cb)
174 # Speak the speech sequence.
175 spoke = speech.speakWithoutPauses(seq)
176 # Update the textInfo state ready for when speaking the next line.
177 self.speakTextInfoState = state.copy()
178
179 # Collapse to the end of this line, ready to read the next.
180 try:
181 self.reader.collapse(end=True)
182 except RuntimeError:
183 # This occurs in Microsoft Word when the range covers the end of the document.
184 # without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.
185 self.finish()
186 return
187 if not spoke:
188 # This line didn't include a natural pause, so nothing was spoken.
189 self.numBufferedLines += 1
190 if self.numBufferedLines < self.MAX_BUFFERED_LINES:
191 # Move on to the next line.
192 # We queue this to allow the user a chance to stop say all.
193 queueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)
194 else:
195 # We don't want to buffer too much.
196 # Force speech. lineReached will resume things when speech catches up.
197 speech.speakWithoutPauses(None)
198 # The first buffered line has now started speaking.
199 self.numBufferedLines -= 1
200
201 def lineReached(self, obj, bookmark, state):
202 # We've just started speaking this line, so move the cursor there.
203 state.updateObj()
204 updater = obj.makeTextInfo(bookmark)
205 if self.cursor == CURSOR_CARET:
206 updater.updateCaret()
207 if self.cursor != CURSOR_CARET or config.conf["reviewCursor"]["followCaret"]:
208 api.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)
209 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
210 if self.numBufferedLines == 0:
211 # This was the last line spoken, so move on.
212 self.nextLine()
213 else:
214 self.numBufferedLines -= 1
215
216 def turnPage(self):
217 try:
218 self.reader.obj.turnPage()
219 except RuntimeError:
220 log.debug("No more pages")
221 # No more pages.
222 self.stop()
223 return
224 self.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)
225 self.nextLine()
226
227 def finish(self):
228 # There is no more text.
229 # Call stop to clean up, but only after speech completely finishes.
230 # Otherwise, if a different synth is being used for say all,
231 # we might switch synths too early and truncate the final speech.
232 # We do this by putting a CallbackCommand at the start of a new utterance.
233 cb = speech.CallbackCommand(self.stop, name="say-all:stop")
234 speech.speakWithoutPauses([
235 speech.EndUtteranceCommand(),
236 cb,
237 speech.EndUtteranceCommand()
238 ])
239
240 def stop(self):
241 if not self.reader:
242 return
243 self.reader = None
244 self.trigger.exit()
245 self.trigger = None
246
247 def __del__(self):
248 self.stop()
249
250 class SayAllProfileTrigger(config.ProfileTrigger):
251 """A configuration profile trigger for when say all is in progress.
252 """
253 spec = "sayAll"
254
[end of source/sayAllHandler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py
--- a/source/sayAllHandler.py
+++ b/source/sayAllHandler.py
@@ -76,7 +76,11 @@
def readText(cursor):
global lastSayAllMode, _activeSayAll
lastSayAllMode=cursor
- reader = _TextReader(cursor)
+ try:
+ reader = _TextReader(cursor)
+ except NotImplementedError:
+ log.debugWarning("Unable to make reader", exc_info=True)
+ return
_activeSayAll = weakref.ref(reader)
reader.nextLine()
@@ -110,8 +114,8 @@
if cursor == CURSOR_CARET:
try:
self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
- except (NotImplementedError, RuntimeError):
- return
+ except (NotImplementedError, RuntimeError) as e:
+ raise NotImplementedError("Unable to make TextInfo: " + str(e))
else:
self.reader = api.getReviewPosition()
self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
|
{"golden_diff": "diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py\n--- a/source/sayAllHandler.py\n+++ b/source/sayAllHandler.py\n@@ -76,7 +76,11 @@\n def readText(cursor):\r\n \tglobal lastSayAllMode, _activeSayAll\r\n \tlastSayAllMode=cursor\r\n-\treader = _TextReader(cursor)\r\n+\ttry:\r\n+\t\treader = _TextReader(cursor)\r\n+\texcept NotImplementedError:\r\n+\t\tlog.debugWarning(\"Unable to make reader\", exc_info=True)\r\n+\t\treturn\r\n \t_activeSayAll = weakref.ref(reader)\r\n \treader.nextLine()\r\n \r\n@@ -110,8 +114,8 @@\n \t\tif cursor == CURSOR_CARET:\r\n \t\t\ttry:\r\n \t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n-\t\t\texcept (NotImplementedError, RuntimeError):\r\n-\t\t\t\treturn\r\n+\t\t\texcept (NotImplementedError, RuntimeError) as e:\r\n+\t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n \t\telse:\r\n \t\t\tself.reader = api.getReviewPosition()\r\n \t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\n", "issue": "Say all on Desktop raises an error\n### Steps to reproduce:\r\n1. Focus the desktop.\r\n2. Invoke caret say all\r\n\r\n### Actual behavior:\r\nThe following error is raised:\r\n\r\n```\r\nERROR - scriptHandler.executeScript (15:54:57.769):\r\nerror executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'\r\nTraceback (most recent call last):\r\n File \"scriptHandler.pyc\", line 190, in executeScript\r\n File \"globalCommands.pyc\", line 1334, in script_sayAll\r\n File \"sayAllHandler.pyc\", line 79, in readText\r\n File \"sayAllHandler.pyc\", line 119, in nextLine\r\nAttributeError: '_TextReader' object has no attribute 'reader'\r\nERROR - stderr (15:54:57.779):\r\nException ignored in:\r\nERROR - stderr (15:54:57.790):\r\n<function _TextReader.__del__ at 0x0462F390>\r\nERROR - stderr (15:54:57.803):\r\nTraceback (most recent call last):\r\nERROR - stderr (15:54:57.815):\r\n File \"sayAllHandler.pyc\", line 213, in __del__\r\nERROR - stderr (15:54:57.827):\r\n File \"sayAllHandler.pyc\", line 206, in stop\r\nERROR - stderr (15:54:57.839):\r\nAttributeError\r\nERROR - stderr (15:54:57.851):\r\n:\r\nERROR - stderr (15:54:57.863):\r\n'_TextReader' object has no attribute 'reader'\r\n```\r\n\r\n### Expected behavior:\r\nNO error\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n\r\n#### NVDA version:\r\nthreshold-18069\r\n\r\n#### Windows version:\r\nWindows 10 1903 build 18362.239\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2017 NV Access Limited\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\nimport weakref\r\nimport speech\r\nimport synthDriverHandler\r\nfrom logHandler import log\r\nimport config\r\nimport controlTypes\r\nimport api\r\nimport textInfos\r\nimport queueHandler\r\nimport winKernel\r\n\r\nCURSOR_CARET = 0\r\nCURSOR_REVIEW = 1\r\n\r\nlastSayAllMode = None\r\n#: The active say all manager.\r\n#: This is a weakref because the manager should be allowed to die once say all is complete.\r\n_activeSayAll = lambda: None # Return None when called like a dead weakref.\r\n\r\ndef stop():\r\n\tactive = _activeSayAll()\r\n\tif active:\r\n\t\tactive.stop()\r\n\r\ndef isRunning():\r\n\t\"\"\"Determine whether say all is currently running.\r\n\t@return: C{True} if say all is currently running, C{False} if not.\r\n\t@rtype: bool\r\n\t\"\"\"\r\n\treturn bool(_activeSayAll())\r\n\r\ndef readObjects(obj):\r\n\tglobal _activeSayAll\r\n\treader = _ObjectsReader(obj)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.next()\r\n\r\nclass _ObjectsReader(object):\r\n\r\n\tdef __init__(self, root):\r\n\t\tself.walker = self.walk(root)\r\n\t\tself.prevObj = None\r\n\r\n\tdef walk(self, obj):\r\n\t\tyield obj\r\n\t\tchild=obj.simpleFirstChild\r\n\t\twhile child:\r\n\t\t\tfor descendant in self.walk(child):\r\n\t\t\t\tyield descendant\r\n\t\t\tchild=child.simpleNext\r\n\r\n\tdef next(self):\r\n\t\tif not self.walker:\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif self.prevObj:\r\n\t\t\t# We just started speaking this object, so move the navigator to it.\r\n\t\t\tapi.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)\r\n\t\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\t# Move onto the next object.\r\n\t\tself.prevObj = obj = next(self.walker, None)\r\n\t\tif not obj:\r\n\t\t\treturn\r\n\t\t# Call this method again when we start speaking this object.\r\n\t\tcallbackCommand = speech.CallbackCommand(self.next, name=\"say-all:next\")\r\n\t\tspeech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)\r\n\r\n\tdef stop(self):\r\n\t\tself.walker = None\r\n\r\ndef readText(cursor):\r\n\tglobal lastSayAllMode, _activeSayAll\r\n\tlastSayAllMode=cursor\r\n\treader = _TextReader(cursor)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.nextLine()\r\n\r\nclass _TextReader(object):\r\n\t\"\"\"Manages continuous reading of text.\r\n\tThis is intended for internal use only.\r\n\r\n\tThe high level flow of control is as follows:\r\n\t1. The constructor sets things up.\r\n\t2. L{nextLine} is called to read the first line.\r\n\t3. When it speaks a line, L{nextLine} request that L{lineReached} be called\r\n\t\twhen we start speaking this line, providing the position and state at this point.\r\n\t4. When we start speaking a line, L{lineReached} is called\r\n\t\tand moves the cursor to that line.\r\n\t5. L{lineReached} calls L{nextLine}.\r\n\t6. If there are more lines, L{nextLine} works as per steps 3 and 4.\r\n\t7. Otherwise, if the object doesn't support page turns, we're finished.\r\n\t8. If the object does support page turns,\r\n\t\twe request that L{turnPage} be called when speech is finished.\r\n\t9. L{turnPage} tries to turn the page.\r\n\t10. If there are no more pages, we're finished.\r\n\t11. If there is another page, L{turnPage} calls L{nextLine}.\r\n\t\"\"\"\r\n\tMAX_BUFFERED_LINES = 10\r\n\r\n\tdef __init__(self, cursor):\r\n\t\tself.cursor = cursor\r\n\t\tself.trigger = SayAllProfileTrigger()\r\n\t\tself.trigger.enter()\r\n\t\t# Start at the cursor.\r\n\t\tif cursor == CURSOR_CARET:\r\n\t\t\ttry:\r\n\t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n\t\t\texcept (NotImplementedError, RuntimeError):\r\n\t\t\t\treturn\r\n\t\telse:\r\n\t\t\tself.reader = api.getReviewPosition()\r\n\t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n\t\tself.numBufferedLines = 0\r\n\r\n\tdef nextLine(self):\r\n\t\tif not self.reader:\r\n\t\t\tlog.debug(\"no self.reader\")\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif not self.reader.obj:\r\n\t\t\tlog.debug(\"no self.reader.obj\")\r\n\t\t\t# The object died, so we should too.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tbookmark = self.reader.bookmark\r\n\t\t# Expand to the current line.\r\n\t\t# We use move end rather than expand\r\n\t\t# because the user might start in the middle of a line\r\n\t\t# and we don't want to read from the start of the line in that case.\r\n\t\t# For lines after the first, it's also more efficient because\r\n\t\t# we're already at the start of the line, so there's no need to search backwards.\r\n\t\tdelta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint=\"end\")\r\n\t\tif delta <= 0:\r\n\t\t\t# No more text.\r\n\t\t\tif isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):\r\n\t\t\t\t# Once the last line finishes reading, try turning the page.\r\n\t\t\t\tcb = speech.CallbackCommand(self.turnPage, name=\"say-all:turnPage\")\r\n\t\t\t\tspeech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])\r\n\t\t\telse:\r\n\t\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\t\t# Copy the speakTextInfoState so that speak callbackCommand\r\n\t\t# and its associated callback are using a copy isolated to this specific line.\r\n\t\tstate = self.speakTextInfoState.copy()\r\n\t\t# Call lineReached when we start speaking this line.\r\n\t\t# lineReached will move the cursor and trigger reading of the next line.\r\n\r\n\t\tdef _onLineReached(obj=self.reader.obj, state=state):\r\n\t\t\tself.lineReached(obj, bookmark, state)\r\n\r\n\t\tcb = speech.CallbackCommand(\r\n\t\t\t_onLineReached,\r\n\t\t\tname=\"say-all:lineReached\"\r\n\t\t)\r\n\r\n\t\t# Generate the speech sequence for the reader textInfo\r\n\t\t# and insert the lineReached callback at the very beginning of the sequence.\r\n\t\t# _linePrefix on speakTextInfo cannot be used here\r\n\t\t# As it would be inserted in the sequence after all initial control starts which is too late.\r\n\t\tspeechGen = speech.getTextInfoSpeech(\r\n\t\t\tself.reader,\r\n\t\t\tunit=textInfos.UNIT_READINGCHUNK,\r\n\t\t\treason=controlTypes.REASON_SAYALL,\r\n\t\t\tuseCache=state\r\n\t\t)\r\n\t\tseq = list(speech._flattenNestedSequences(speechGen))\r\n\t\tseq.insert(0, cb)\r\n\t\t# Speak the speech sequence.\r\n\t\tspoke = speech.speakWithoutPauses(seq)\r\n\t\t# Update the textInfo state ready for when speaking the next line.\r\n\t\tself.speakTextInfoState = state.copy()\r\n\r\n\t\t# Collapse to the end of this line, ready to read the next.\r\n\t\ttry:\r\n\t\t\tself.reader.collapse(end=True)\r\n\t\texcept RuntimeError:\r\n\t\t\t# This occurs in Microsoft Word when the range covers the end of the document.\r\n\t\t\t# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tif not spoke:\r\n\t\t\t# This line didn't include a natural pause, so nothing was spoken.\r\n\t\t\tself.numBufferedLines += 1\r\n\t\t\tif self.numBufferedLines < self.MAX_BUFFERED_LINES:\r\n\t\t\t\t# Move on to the next line.\r\n\t\t\t\t# We queue this to allow the user a chance to stop say all.\r\n\t\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)\r\n\t\t\telse:\r\n\t\t\t\t# We don't want to buffer too much.\r\n\t\t\t\t# Force speech. lineReached will resume things when speech catches up.\r\n\t\t\t\tspeech.speakWithoutPauses(None)\r\n\t\t\t\t# The first buffered line has now started speaking.\r\n\t\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef lineReached(self, obj, bookmark, state):\r\n\t\t# We've just started speaking this line, so move the cursor there.\r\n\t\tstate.updateObj()\r\n\t\tupdater = obj.makeTextInfo(bookmark)\r\n\t\tif self.cursor == CURSOR_CARET:\r\n\t\t\tupdater.updateCaret()\r\n\t\tif self.cursor != CURSOR_CARET or config.conf[\"reviewCursor\"][\"followCaret\"]:\r\n\t\t\tapi.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)\r\n\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\tif self.numBufferedLines == 0:\r\n\t\t\t# This was the last line spoken, so move on.\r\n\t\t\tself.nextLine()\r\n\t\telse:\r\n\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef turnPage(self):\r\n\t\ttry:\r\n\t\t\tself.reader.obj.turnPage()\r\n\t\texcept RuntimeError:\r\n\t\t\tlog.debug(\"No more pages\")\r\n\t\t\t# No more pages.\r\n\t\t\tself.stop()\r\n\t\t\treturn\r\n\t\tself.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)\r\n\t\tself.nextLine()\r\n\r\n\tdef finish(self):\r\n\t\t# There is no more text.\r\n\t\t# Call stop to clean up, but only after speech completely finishes.\r\n\t\t# Otherwise, if a different synth is being used for say all,\r\n\t\t# we might switch synths too early and truncate the final speech.\r\n\t\t# We do this by putting a CallbackCommand at the start of a new utterance.\r\n\t\tcb = speech.CallbackCommand(self.stop, name=\"say-all:stop\")\r\n\t\tspeech.speakWithoutPauses([\r\n\t\t\tspeech.EndUtteranceCommand(),\r\n\t\t\tcb,\r\n\t\t\tspeech.EndUtteranceCommand()\r\n\t\t])\r\n\r\n\tdef stop(self):\r\n\t\tif not self.reader:\r\n\t\t\treturn\r\n\t\tself.reader = None\r\n\t\tself.trigger.exit()\r\n\t\tself.trigger = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.stop()\r\n\r\nclass SayAllProfileTrigger(config.ProfileTrigger):\r\n\t\"\"\"A configuration profile trigger for when say all is in progress.\r\n\t\"\"\"\r\n\tspec = \"sayAll\"\r\n", "path": "source/sayAllHandler.py"}]}
| 3,947 | 253 |
gh_patches_debug_34969
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2525
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature Request: !Sub equivalent of W1001
*cfn-lint version: 0.58.2*
I believe that `!Sub` parameters should be checked to see if they depend on conditional resources the same way W1001 checks this for `!Ref` (e.g. `SubCondParam.Value` should produce a warning). I suspect this is possible since E1019 checks for non-existent parameters within a `!Sub`.
```
---
AWSTemplateFormatVersion: 2010-09-09
Conditions:
CreateContainerRepository: !Equals ["yes", "no"]
Resources:
Repository:
Type: AWS::ECR::Repository
Condition: CreateContainerRepository
SubCondParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Sub ${Repository}
RefCondParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Ref Repository
SubFakeParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Sub ${Fake}
RefFakeParam:
Type: AWS::SSM::Parameter
Properties:
Type: String
Value: !Ref Fake
```
* SubCondParam.Value shows no error or warning, I believe it should show a warning
* RefCondParam.Value shows W1001
* SubFakeParam.Value shows E1019
* RefFakeParam.Value shows E1012
</issue>
<code>
[start of src/cfnlint/rules/functions/RelationshipConditions.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.helpers import PSEUDOPARAMS
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class RelationshipConditions(CloudFormationLintRule):
10 """Check if Ref/GetAtt values are available via conditions"""
11
12 id = "W1001"
13 shortdesc = "Ref/GetAtt to resource that is available when conditions are applied"
14 description = (
15 "Check the Conditions that affect a Ref/GetAtt to make sure "
16 "the resource being related to is available when there is a resource "
17 "condition."
18 )
19 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html"
20 tags = ["conditions", "resources", "relationships", "ref", "getatt"]
21
22 def match(self, cfn):
23 """Check CloudFormation Ref/GetAtt for Conditions"""
24
25 matches = []
26
27 # Start with Ref checks
28 ref_objs = cfn.search_deep_keys(searchText="Ref", includeGlobals=False)
29 for ref_obj in ref_objs:
30 value = ref_obj[-1]
31 if value not in PSEUDOPARAMS:
32 scenarios = cfn.is_resource_available(ref_obj, value)
33 for scenario in scenarios:
34 # pylint: disable=consider-using-f-string
35 scenario_text = " and ".join(
36 [
37 'when condition "%s" is %s' % (k, v)
38 for (k, v) in scenario.items()
39 ]
40 )
41 message = (
42 'Ref to resource "{0}" that may not be available {1} at {2}'
43 )
44 matches.append(
45 RuleMatch(
46 ref_obj[:-1],
47 message.format(
48 value, scenario_text, "/".join(map(str, ref_obj[:-1]))
49 ),
50 )
51 )
52
53 # The do GetAtt
54 getatt_objs = cfn.search_deep_keys(
55 searchText="Fn::GetAtt", includeGlobals=False
56 )
57 for getatt_obj in getatt_objs:
58 value_obj = getatt_obj[-1]
59 value = None
60 if isinstance(value_obj, list):
61 value = value_obj[0]
62 elif isinstance(value_obj, str):
63 value = value_obj.split(".")[0]
64 if value:
65 if value not in PSEUDOPARAMS:
66 scenarios = cfn.is_resource_available(getatt_obj, value)
67 for scenario in scenarios:
68 scenario_text = " and ".join(
69 [
70 f'when condition "{k}" is {v}'
71 for (k, v) in scenario.items()
72 ]
73 )
74 message = 'GetAtt to resource "{0}" that may not be available {1} at {2}'
75 matches.append(
76 RuleMatch(
77 getatt_obj[:-1],
78 message.format(
79 value,
80 scenario_text,
81 "/".join(map(str, getatt_obj[:-1])),
82 ),
83 )
84 )
85
86 return matches
87
[end of src/cfnlint/rules/functions/RelationshipConditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/functions/RelationshipConditions.py b/src/cfnlint/rules/functions/RelationshipConditions.py
--- a/src/cfnlint/rules/functions/RelationshipConditions.py
+++ b/src/cfnlint/rules/functions/RelationshipConditions.py
@@ -17,7 +17,7 @@
"condition."
)
source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html"
- tags = ["conditions", "resources", "relationships", "ref", "getatt"]
+ tags = ["conditions", "resources", "relationships", "ref", "getatt", "sub"]
def match(self, cfn):
"""Check CloudFormation Ref/GetAtt for Conditions"""
@@ -83,4 +83,46 @@
)
)
+ # The do Sub
+ sub_objs = cfn.search_deep_keys(searchText="Fn::Sub", includeGlobals=False)
+ for sub_obj in sub_objs:
+ sub_string = sub_obj[-1]
+ # Filter out bad types of sub_strings.
+ # Lists have two be two items and it can be just a string
+ if not isinstance(sub_string, (list, str)):
+ continue
+ if isinstance(sub_string, str):
+ sub_string = [sub_string, {}]
+ if len(sub_string) != 2:
+ continue
+ sub_params = sub_string[1]
+ string_params = cfn.get_sub_parameters(sub_string[0])
+
+ for string_param in string_params:
+ if string_param not in sub_params:
+ # deal with GetAtts by dropping everything after the .
+ string_param = string_param.split(".")[0]
+ if string_param in cfn.template.get("Resources", {}):
+ scenarios = cfn.is_resource_available(
+ sub_obj[:-1], string_param
+ )
+ for scenario in scenarios:
+ scenario_text = " and ".join(
+ [
+ f'when condition "{k}" is {v}'
+ for (k, v) in scenario.items()
+ ]
+ )
+ message = 'Fn::Sub to resource "{0}" that may not be available {1} at {2}'
+ matches.append(
+ RuleMatch(
+ sub_obj[:-1],
+ message.format(
+ string_param,
+ scenario_text,
+ "/".join(map(str, sub_obj[:-1])),
+ ),
+ )
+ )
+
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/RelationshipConditions.py b/src/cfnlint/rules/functions/RelationshipConditions.py\n--- a/src/cfnlint/rules/functions/RelationshipConditions.py\n+++ b/src/cfnlint/rules/functions/RelationshipConditions.py\n@@ -17,7 +17,7 @@\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n- tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\"]\n+ tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\", \"sub\"]\n \n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n@@ -83,4 +83,46 @@\n )\n )\n \n+ # The do Sub\n+ sub_objs = cfn.search_deep_keys(searchText=\"Fn::Sub\", includeGlobals=False)\n+ for sub_obj in sub_objs:\n+ sub_string = sub_obj[-1]\n+ # Filter out bad types of sub_strings.\n+ # Lists have two be two items and it can be just a string\n+ if not isinstance(sub_string, (list, str)):\n+ continue\n+ if isinstance(sub_string, str):\n+ sub_string = [sub_string, {}]\n+ if len(sub_string) != 2:\n+ continue\n+ sub_params = sub_string[1]\n+ string_params = cfn.get_sub_parameters(sub_string[0])\n+\n+ for string_param in string_params:\n+ if string_param not in sub_params:\n+ # deal with GetAtts by dropping everything after the .\n+ string_param = string_param.split(\".\")[0]\n+ if string_param in cfn.template.get(\"Resources\", {}):\n+ scenarios = cfn.is_resource_available(\n+ sub_obj[:-1], string_param\n+ )\n+ for scenario in scenarios:\n+ scenario_text = \" and \".join(\n+ [\n+ f'when condition \"{k}\" is {v}'\n+ for (k, v) in scenario.items()\n+ ]\n+ )\n+ message = 'Fn::Sub to resource \"{0}\" that may not be available {1} at {2}'\n+ matches.append(\n+ RuleMatch(\n+ sub_obj[:-1],\n+ message.format(\n+ string_param,\n+ scenario_text,\n+ \"/\".join(map(str, sub_obj[:-1])),\n+ ),\n+ )\n+ )\n+\n return matches\n", "issue": "Feature Request: !Sub equivalent of W1001\n*cfn-lint version: 0.58.2*\r\n\r\nI believe that `!Sub` parameters should be checked to see if they depend on conditional resources the same way W1001 checks this for `!Ref` (e.g. `SubCondParam.Value` should produce a warning). I suspect this is possible since E1019 checks for non-existent parameters within a `!Sub`.\r\n\r\n```\r\n---\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nConditions:\r\n CreateContainerRepository: !Equals [\"yes\", \"no\"]\r\n\r\nResources:\r\n Repository:\r\n Type: AWS::ECR::Repository\r\n Condition: CreateContainerRepository\r\n\r\n SubCondParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Sub ${Repository}\r\n\r\n RefCondParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Ref Repository\r\n\r\n SubFakeParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Sub ${Fake}\r\n\r\n RefFakeParam:\r\n Type: AWS::SSM::Parameter\r\n Properties:\r\n Type: String\r\n Value: !Ref Fake\r\n```\r\n\r\n* SubCondParam.Value shows no error or warning, I believe it should show a warning\r\n* RefCondParam.Value shows W1001\r\n* SubFakeParam.Value shows E1019\r\n* RefFakeParam.Value shows E1012\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.helpers import PSEUDOPARAMS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RelationshipConditions(CloudFormationLintRule):\n \"\"\"Check if Ref/GetAtt values are available via conditions\"\"\"\n\n id = \"W1001\"\n shortdesc = \"Ref/GetAtt to resource that is available when conditions are applied\"\n description = (\n \"Check the Conditions that affect a Ref/GetAtt to make sure \"\n \"the resource being related to is available when there is a resource \"\n \"condition.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html\"\n tags = [\"conditions\", \"resources\", \"relationships\", \"ref\", \"getatt\"]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Ref/GetAtt for Conditions\"\"\"\n\n matches = []\n\n # Start with Ref checks\n ref_objs = cfn.search_deep_keys(searchText=\"Ref\", includeGlobals=False)\n for ref_obj in ref_objs:\n value = ref_obj[-1]\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(ref_obj, value)\n for scenario in scenarios:\n # pylint: disable=consider-using-f-string\n scenario_text = \" and \".join(\n [\n 'when condition \"%s\" is %s' % (k, v)\n for (k, v) in scenario.items()\n ]\n )\n message = (\n 'Ref to resource \"{0}\" that may not be available {1} at {2}'\n )\n matches.append(\n RuleMatch(\n ref_obj[:-1],\n message.format(\n value, scenario_text, \"/\".join(map(str, ref_obj[:-1]))\n ),\n )\n )\n\n # The do GetAtt\n getatt_objs = cfn.search_deep_keys(\n searchText=\"Fn::GetAtt\", includeGlobals=False\n )\n for getatt_obj in getatt_objs:\n value_obj = getatt_obj[-1]\n value = None\n if isinstance(value_obj, list):\n value = value_obj[0]\n elif isinstance(value_obj, str):\n value = value_obj.split(\".\")[0]\n if value:\n if value not in PSEUDOPARAMS:\n scenarios = cfn.is_resource_available(getatt_obj, value)\n for scenario in scenarios:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in scenario.items()\n ]\n )\n message = 'GetAtt to resource \"{0}\" that may not be available {1} at {2}'\n matches.append(\n RuleMatch(\n getatt_obj[:-1],\n message.format(\n value,\n scenario_text,\n \"/\".join(map(str, getatt_obj[:-1])),\n ),\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/functions/RelationshipConditions.py"}]}
| 1,698 | 548 |
gh_patches_debug_16419
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-16001
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`./pants run` crashes when `remote_cache_eager_fetch = false`
**Describe the bug**
After setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:
```
Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }
```
**Pants version**
`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`
**OS**
Linux
**Additional info**
Some BuildSense links where we've seen the failure:
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/
* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/
</issue>
<code>
[start of src/python/pants/base/exceptions.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6
7 class TargetDefinitionException(Exception):
8 """Indicates an invalid target definition.
9
10 :API: public
11 """
12
13 def __init__(self, target, msg):
14 """
15 :param target: the target in question
16 :param string msg: a description of the target misconfiguration
17 """
18 super().__init__(f"Invalid target {target}: {msg}")
19
20
21 class BuildConfigurationError(Exception):
22 """Indicates an error in a pants installation's configuration."""
23
24
25 class BackendConfigurationError(BuildConfigurationError):
26 """Indicates a plugin backend with a missing or malformed register module."""
27
28
29 class MappingError(Exception):
30 """Indicates an error mapping addressable objects."""
31
[end of src/python/pants/base/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py
--- a/src/python/pants/base/exceptions.py
+++ b/src/python/pants/base/exceptions.py
@@ -3,6 +3,11 @@
from __future__ import annotations
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pants.engine.internals.native_engine import PyFailure
+
class TargetDefinitionException(Exception):
"""Indicates an invalid target definition.
@@ -28,3 +33,15 @@
class MappingError(Exception):
"""Indicates an error mapping addressable objects."""
+
+
+class NativeEngineFailure(Exception):
+ """A wrapper around a `Failure` instance.
+
+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with
+ additional fields. See https://github.com/PyO3/pyo3/issues/295
+ """
+
+ def __init__(self, msg: str, failure: PyFailure) -> None:
+ super().__init__(msg)
+ self.failure = failure
|
{"golden_diff": "diff --git a/src/python/pants/base/exceptions.py b/src/python/pants/base/exceptions.py\n--- a/src/python/pants/base/exceptions.py\n+++ b/src/python/pants/base/exceptions.py\n@@ -3,6 +3,11 @@\n \n from __future__ import annotations\n \n+from typing import TYPE_CHECKING\n+\n+if TYPE_CHECKING:\n+ from pants.engine.internals.native_engine import PyFailure\n+\n \n class TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n@@ -28,3 +33,15 @@\n \n class MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n+\n+\n+class NativeEngineFailure(Exception):\n+ \"\"\"A wrapper around a `Failure` instance.\n+\n+ TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with\n+ additional fields. See https://github.com/PyO3/pyo3/issues/295\n+ \"\"\"\n+\n+ def __init__(self, msg: str, failure: PyFailure) -> None:\n+ super().__init__(msg)\n+ self.failure = failure\n", "issue": "`./pants run` crashes when `remote_cache_eager_fetch = false`\n**Describe the bug**\r\n\r\nAfter setting `remote_cache_eager_fetch = false`, a CI step that `./pants run`s a `pex_binary` has started (intermittently) failing with errors like:\r\n```\r\n Exception: Was not present in either the local or remote store: Digest { hash: Fingerprint<46683dec8706b7ac7c4f6011f68b4b8c10ad423ae8ba57745a6f5e01ba5b2f7b>, size_bytes: 11827 }\r\n```\r\n\r\n**Pants version**\r\n\r\n`PANTS_SHA=5d8a328d72209863986c8959b20305505bc068ba`\r\n\r\n**OS**\r\n\r\nLinux\r\n\r\n**Additional info**\r\n\r\nSome BuildSense links where we've seen the failure:\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_31_09_277_3793f53b54914135885f2ac951faf210/\r\n* https://app.toolchain.com/organizations/color/repos/color/builds/pants_run_2022_06_27_19_29_46_827_37a3a5f2d8e440cca373a7fb4a5d3b51/\r\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\n\nclass TargetDefinitionException(Exception):\n \"\"\"Indicates an invalid target definition.\n\n :API: public\n \"\"\"\n\n def __init__(self, target, msg):\n \"\"\"\n :param target: the target in question\n :param string msg: a description of the target misconfiguration\n \"\"\"\n super().__init__(f\"Invalid target {target}: {msg}\")\n\n\nclass BuildConfigurationError(Exception):\n \"\"\"Indicates an error in a pants installation's configuration.\"\"\"\n\n\nclass BackendConfigurationError(BuildConfigurationError):\n \"\"\"Indicates a plugin backend with a missing or malformed register module.\"\"\"\n\n\nclass MappingError(Exception):\n \"\"\"Indicates an error mapping addressable objects.\"\"\"\n", "path": "src/python/pants/base/exceptions.py"}]}
| 1,221 | 243 |
gh_patches_debug_40698
|
rasdani/github-patches
|
git_diff
|
pyjanitor-devs__pyjanitor-761
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Latest requirements.txt seems to be over-specifying dependencies
This is the latest version requirements.txt file:
```
black>=19.3b0
darglint
hypothesis>=4.4.0
interrogate
ipykernel
isort>=4.3.18
jupyter_client
lxml
natsort
nbsphinx>=0.4.2
pandas-flavor
pandas-vet
pre-commit
pyspark
pytest-azurepipelines
pytest-cov
pytest>=3.4.2
scikit-learn
seaborn
setuptools>=38.5.2
sphinxcontrib-fulltoc==1.2.0
unyt
xarray
```
Those are inject in the setup.py as mandatory dependencies but a lot in there looks like docs, dev, or optional. Is pyjanitor really require all that at run time?
</issue>
<code>
[start of setup.py]
1 """Setup script."""
2
3 import re
4 from pathlib import Path
5
6 from setuptools import find_packages, setup
7
8
9 def requirements():
10 """Reader for requirements.txt"""
11 with open("requirements.txt", "r+") as f:
12 return f.read()
13
14
15 def generate_long_description() -> str:
16 """
17 Extra chunks from README for PyPI description.
18
19 Target chunks must be contained within `.. pypi-doc` pair comments,
20 so there must be an even number of comments in README.
21
22 :returns: Extracted description from README.
23 :raises Exception: if odd number of ``.. pypi-doc`` comments
24 in README.
25 """
26 # Read the contents of README file
27 this_directory = Path(__file__).parent
28 with open(this_directory / "README.rst", encoding="utf-8") as f:
29 readme = f.read()
30
31 # Find pypi-doc comments in README
32 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
33 if len(indices) % 2 != 0:
34 raise Exception("Odd number of `.. pypi-doc` comments in README")
35
36 # Loop through pairs of comments and save text between pairs
37 long_description = ""
38 for i in range(0, len(indices), 2):
39 start_index = indices[i] + 11
40 end_index = indices[i + 1]
41 long_description += readme[start_index:end_index]
42 return long_description
43
44
45 extra_spark = ["pyspark"]
46 extra_biology = ["biopython"]
47 extra_chemistry = ["rdkit"]
48 extra_engineering = ["unyt"]
49 extra_all = extra_biology + extra_engineering + extra_spark
50
51 setup(
52 name="pyjanitor",
53 version="0.20.10",
54 description="Tools for cleaning pandas DataFrames",
55 author="Eric J. Ma",
56 author_email="[email protected]",
57 url="https://github.com/ericmjl/pyjanitor",
58 license="MIT",
59 # packages=["janitor", "janitor.xarray", "janitor.spark"],
60 packages=find_packages(),
61 install_requires=requirements(),
62 extras_require={
63 "all": extra_all,
64 "biology": extra_biology,
65 # "chemistry": extra_chemistry, should be inserted once rdkit
66 # fixes https://github.com/rdkit/rdkit/issues/1812
67 "engineering": extra_engineering,
68 "spark": extra_spark,
69 },
70 python_requires=">=3.6",
71 long_description=generate_long_description(),
72 long_description_content_type="text/x-rst",
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,15 +1,82 @@
"""Setup script."""
-
+import codecs
+import os
import re
from pathlib import Path
+from pprint import pprint
from setuptools import find_packages, setup
+HERE = os.path.abspath(os.path.dirname(__file__))
+
+
+def read(*parts):
+ # intentionally *not* adding an encoding option to open
+ return codecs.open(os.path.join(HERE, *parts), "r").read()
+
-def requirements():
- """Reader for requirements.txt"""
- with open("requirements.txt", "r+") as f:
- return f.read()
+def read_requirements(*parts):
+ """
+ Return requirements from parts.
+
+ Given a requirements.txt (or similar style file),
+ returns a list of requirements.
+ Assumes anything after a single '#' on a line is a comment, and ignores
+ empty lines.
+
+ :param parts: list of filenames which contain the installation "parts",
+ i.e. submodule-specific installation requirements
+ :returns: A compiled list of requirements.
+ """
+ requirements = []
+ for line in read(*parts).splitlines():
+ new_line = re.sub( # noqa: PD005
+ r"(\s*)?#.*$", # the space immediately before the
+ # hash mark, the hash mark, and
+ # anything that follows it
+ "", # replace with a blank string
+ line,
+ )
+ new_line = re.sub( # noqa: PD005
+ r"-r.*$", # link to another requirement file
+ "", # replace with a blank string
+ new_line,
+ )
+ new_line = re.sub( # noqa: PD005
+ r"-e \..*$", # link to editable install
+ "", # replace with a blank string
+ new_line,
+ )
+ # print(line, "-->", new_line)
+ if new_line: # i.e. we have a non-zero-length string
+ requirements.append(new_line)
+ return requirements
+
+
+# pull from requirements.IN, requirements.TXT is generated from this
+INSTALL_REQUIRES = read_requirements(".requirements/base.in")
+
+EXTRA_REQUIRES = {
+ "dev": read_requirements(".requirements/dev.in"),
+ "docs": read_requirements(".requirements/docs.in"),
+ "test": read_requirements(".requirements/testing.in"),
+ "biology": read_requirements(".requirements/biology.in"),
+ "chemistry": read_requirements(".requirements/chemistry.in"),
+ "engineering": read_requirements(".requirements/engineering.in"),
+ "spark": read_requirements(".requirements/spark.in"),
+}
+
+# add 'all' key to EXTRA_REQUIRES
+all_requires = []
+for k, v in EXTRA_REQUIRES.items():
+ all_requires.extend(v)
+EXTRA_REQUIRES["all"] = set(all_requires)
+
+for k1 in ["biology", "chemistry", "engineering", "spark"]:
+ for v2 in EXTRA_REQUIRES[k1]:
+ EXTRA_REQUIRES["docs"].append(v2)
+
+pprint(EXTRA_REQUIRES)
def generate_long_description() -> str:
@@ -42,12 +109,6 @@
return long_description
-extra_spark = ["pyspark"]
-extra_biology = ["biopython"]
-extra_chemistry = ["rdkit"]
-extra_engineering = ["unyt"]
-extra_all = extra_biology + extra_engineering + extra_spark
-
setup(
name="pyjanitor",
version="0.20.10",
@@ -58,15 +119,8 @@
license="MIT",
# packages=["janitor", "janitor.xarray", "janitor.spark"],
packages=find_packages(),
- install_requires=requirements(),
- extras_require={
- "all": extra_all,
- "biology": extra_biology,
- # "chemistry": extra_chemistry, should be inserted once rdkit
- # fixes https://github.com/rdkit/rdkit/issues/1812
- "engineering": extra_engineering,
- "spark": extra_spark,
- },
+ install_requires=INSTALL_REQUIRES,
+ extras_require=EXTRA_REQUIRES,
python_requires=">=3.6",
long_description=generate_long_description(),
long_description_content_type="text/x-rst",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,15 +1,82 @@\n \"\"\"Setup script.\"\"\"\n-\n+import codecs\n+import os\n import re\n from pathlib import Path\n+from pprint import pprint\n \n from setuptools import find_packages, setup\n \n+HERE = os.path.abspath(os.path.dirname(__file__))\n+\n+\n+def read(*parts):\n+ # intentionally *not* adding an encoding option to open\n+ return codecs.open(os.path.join(HERE, *parts), \"r\").read()\n+\n \n-def requirements():\n- \"\"\"Reader for requirements.txt\"\"\"\n- with open(\"requirements.txt\", \"r+\") as f:\n- return f.read()\n+def read_requirements(*parts):\n+ \"\"\"\n+ Return requirements from parts.\n+\n+ Given a requirements.txt (or similar style file),\n+ returns a list of requirements.\n+ Assumes anything after a single '#' on a line is a comment, and ignores\n+ empty lines.\n+\n+ :param parts: list of filenames which contain the installation \"parts\",\n+ i.e. submodule-specific installation requirements\n+ :returns: A compiled list of requirements.\n+ \"\"\"\n+ requirements = []\n+ for line in read(*parts).splitlines():\n+ new_line = re.sub( # noqa: PD005\n+ r\"(\\s*)?#.*$\", # the space immediately before the\n+ # hash mark, the hash mark, and\n+ # anything that follows it\n+ \"\", # replace with a blank string\n+ line,\n+ )\n+ new_line = re.sub( # noqa: PD005\n+ r\"-r.*$\", # link to another requirement file\n+ \"\", # replace with a blank string\n+ new_line,\n+ )\n+ new_line = re.sub( # noqa: PD005\n+ r\"-e \\..*$\", # link to editable install\n+ \"\", # replace with a blank string\n+ new_line,\n+ )\n+ # print(line, \"-->\", new_line)\n+ if new_line: # i.e. we have a non-zero-length string\n+ requirements.append(new_line)\n+ return requirements\n+\n+\n+# pull from requirements.IN, requirements.TXT is generated from this\n+INSTALL_REQUIRES = read_requirements(\".requirements/base.in\")\n+\n+EXTRA_REQUIRES = {\n+ \"dev\": read_requirements(\".requirements/dev.in\"),\n+ \"docs\": read_requirements(\".requirements/docs.in\"),\n+ \"test\": read_requirements(\".requirements/testing.in\"),\n+ \"biology\": read_requirements(\".requirements/biology.in\"),\n+ \"chemistry\": read_requirements(\".requirements/chemistry.in\"),\n+ \"engineering\": read_requirements(\".requirements/engineering.in\"),\n+ \"spark\": read_requirements(\".requirements/spark.in\"),\n+}\n+\n+# add 'all' key to EXTRA_REQUIRES\n+all_requires = []\n+for k, v in EXTRA_REQUIRES.items():\n+ all_requires.extend(v)\n+EXTRA_REQUIRES[\"all\"] = set(all_requires)\n+\n+for k1 in [\"biology\", \"chemistry\", \"engineering\", \"spark\"]:\n+ for v2 in EXTRA_REQUIRES[k1]:\n+ EXTRA_REQUIRES[\"docs\"].append(v2)\n+\n+pprint(EXTRA_REQUIRES)\n \n \n def generate_long_description() -> str:\n@@ -42,12 +109,6 @@\n return long_description\n \n \n-extra_spark = [\"pyspark\"]\n-extra_biology = [\"biopython\"]\n-extra_chemistry = [\"rdkit\"]\n-extra_engineering = [\"unyt\"]\n-extra_all = extra_biology + extra_engineering + extra_spark\n-\n setup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n@@ -58,15 +119,8 @@\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n- install_requires=requirements(),\n- extras_require={\n- \"all\": extra_all,\n- \"biology\": extra_biology,\n- # \"chemistry\": extra_chemistry, should be inserted once rdkit\n- # fixes https://github.com/rdkit/rdkit/issues/1812\n- \"engineering\": extra_engineering,\n- \"spark\": extra_spark,\n- },\n+ install_requires=INSTALL_REQUIRES,\n+ extras_require=EXTRA_REQUIRES,\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n", "issue": "Latest requirements.txt seems to be over-specifying dependencies\nThis is the latest version requirements.txt file:\r\n\r\n```\r\nblack>=19.3b0\r\ndarglint\r\nhypothesis>=4.4.0\r\ninterrogate\r\nipykernel\r\nisort>=4.3.18\r\njupyter_client\r\nlxml\r\nnatsort\r\nnbsphinx>=0.4.2\r\npandas-flavor\r\npandas-vet\r\npre-commit\r\npyspark\r\npytest-azurepipelines\r\npytest-cov\r\npytest>=3.4.2\r\nscikit-learn\r\nseaborn\r\nsetuptools>=38.5.2\r\nsphinxcontrib-fulltoc==1.2.0\r\nunyt\r\nxarray\r\n```\r\n\r\nThose are inject in the setup.py as mandatory dependencies but a lot in there looks like docs, dev, or optional. Is pyjanitor really require all that at run time?\n", "before_files": [{"content": "\"\"\"Setup script.\"\"\"\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\n\ndef requirements():\n \"\"\"Reader for requirements.txt\"\"\"\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README.\n :raises Exception: if odd number of ``.. pypi-doc`` comments\n in README.\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.20.10\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n license=\"MIT\",\n # packages=[\"janitor\", \"janitor.xarray\", \"janitor.spark\"],\n packages=find_packages(),\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]}
| 1,426 | 992 |
gh_patches_debug_3040
|
rasdani/github-patches
|
git_diff
|
mosaicml__composer-496
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move `ComposerTrainer` to top-level imports
Our most heavily used objects should be easily importable from `composer` via:
```
from composer import Trainer, ComposerModel
```
rather than remember their submodule:
```
from composer.models import ComposerModel
```
Especially the last one, its tricky to remember whether its `models` or `model`
</issue>
<code>
[start of composer/__init__.py]
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 from composer import algorithms as algorithms
4 from composer import callbacks as callbacks
5 from composer import datasets as datasets
6 from composer import loggers as loggers
7 from composer import models as models
8 from composer import optim as optim
9 from composer import profiler as profiler
10 from composer import trainer as trainer
11 from composer import utils as utils
12 from composer.core import Algorithm as Algorithm
13 from composer.core import Callback as Callback
14 from composer.core import DataSpec as DataSpec
15 from composer.core import Engine as Engine
16 from composer.core import Event as Event
17 from composer.core import Logger as Logger
18 from composer.core import State as State
19 from composer.core import Time as Time
20 from composer.core import Timer as Timer
21 from composer.core import TimeUnit as TimeUnit
22 from composer.core import types as types
23 from composer.trainer import Trainer as Trainer
24
25 __version__ = "0.3.1"
26
[end of composer/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/composer/__init__.py b/composer/__init__.py
--- a/composer/__init__.py
+++ b/composer/__init__.py
@@ -20,6 +20,7 @@
from composer.core import Timer as Timer
from composer.core import TimeUnit as TimeUnit
from composer.core import types as types
+from composer.models import ComposerModel as ComposerModel
from composer.trainer import Trainer as Trainer
__version__ = "0.3.1"
|
{"golden_diff": "diff --git a/composer/__init__.py b/composer/__init__.py\n--- a/composer/__init__.py\n+++ b/composer/__init__.py\n@@ -20,6 +20,7 @@\n from composer.core import Timer as Timer\n from composer.core import TimeUnit as TimeUnit\n from composer.core import types as types\n+from composer.models import ComposerModel as ComposerModel\n from composer.trainer import Trainer as Trainer\n \n __version__ = \"0.3.1\"\n", "issue": "Move `ComposerTrainer` to top-level imports\nOur most heavily used objects should be easily importable from `composer` via:\r\n```\r\nfrom composer import Trainer, ComposerModel\r\n```\r\nrather than remember their submodule:\r\n\r\n```\r\nfrom composer.models import ComposerModel\r\n```\r\n\r\nEspecially the last one, its tricky to remember whether its `models` or `model`\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\nfrom composer import algorithms as algorithms\nfrom composer import callbacks as callbacks\nfrom composer import datasets as datasets\nfrom composer import loggers as loggers\nfrom composer import models as models\nfrom composer import optim as optim\nfrom composer import profiler as profiler\nfrom composer import trainer as trainer\nfrom composer import utils as utils\nfrom composer.core import Algorithm as Algorithm\nfrom composer.core import Callback as Callback\nfrom composer.core import DataSpec as DataSpec\nfrom composer.core import Engine as Engine\nfrom composer.core import Event as Event\nfrom composer.core import Logger as Logger\nfrom composer.core import State as State\nfrom composer.core import Time as Time\nfrom composer.core import Timer as Timer\nfrom composer.core import TimeUnit as TimeUnit\nfrom composer.core import types as types\nfrom composer.trainer import Trainer as Trainer\n\n__version__ = \"0.3.1\"\n", "path": "composer/__init__.py"}]}
| 845 | 103 |
gh_patches_debug_7982
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-5253
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Special characters in st2admin account causing st2 key failure
Just upgraded to version 3.4 and my keyvault is having problems. I believe it's due to my st2admin password containing special characters.
```
[root@stackstorm workflows]# st2 key list --scope=all
Traceback (most recent call last):
File "/bin/st2", line 10, in <module>
sys.exit(main())
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py", line 470, in main
return Shell().run(argv)
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py", line 385, in run
config = self._parse_config_file(args=args, validate_config_permissions=False)
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/base.py", line 183, in _parse_config_file
result = parser.parse()
File "/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/config_parser.py", line 197, in parse
value = get_func(section, key)
File "/usr/lib64/python3.6/configparser.py", line 800, in get
d)
File "/usr/lib64/python3.6/configparser.py", line 394, in before_get
self._interpolate_some(parser, option, L, value, section, defaults, 1)
File "/usr/lib64/python3.6/configparser.py", line 444, in _interpolate_some
"found: %r" % (rest,))
configparser.InterpolationSyntaxError: '%' must be followed by '%' or '(', found: '%3C#V~Bvg%3E3t+'
```
This password above is what I used to install stackstorm. Or at least part of it. I've since changed the password via the documented htpasswd method, but the issue persists. Any tips? Left the password in for research purposes.
curl -sSL https://stackstorm.com/packages/install.sh | bash -s -- --user=st2admin --password='q7j/t%3C#V~Bvg%3E3t+'
</issue>
<code>
[start of st2client/st2client/config_parser.py]
1 # Copyright 2020 The StackStorm Authors.
2 # Copyright 2019 Extreme Networks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """
17 Module for parsing CLI config file.
18 """
19
20 from __future__ import absolute_import
21
22 import logging
23 import os
24
25 from collections import defaultdict
26
27 import io
28
29 import six
30 from six.moves.configparser import ConfigParser
31
32
33 __all__ = [
34 "CLIConfigParser",
35 "ST2_CONFIG_DIRECTORY",
36 "ST2_CONFIG_PATH",
37 "CONFIG_DEFAULT_VALUES",
38 ]
39
40 ST2_CONFIG_DIRECTORY = "~/.st2"
41 ST2_CONFIG_DIRECTORY = os.path.abspath(os.path.expanduser(ST2_CONFIG_DIRECTORY))
42
43 ST2_CONFIG_PATH = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, "config"))
44
45 CONFIG_FILE_OPTIONS = {
46 "general": {
47 "base_url": {"type": "string", "default": None},
48 "api_version": {"type": "string", "default": None},
49 "cacert": {"type": "string", "default": None},
50 "silence_ssl_warnings": {"type": "bool", "default": False},
51 "silence_schema_output": {"type": "bool", "default": True},
52 },
53 "cli": {
54 "debug": {"type": "bool", "default": False},
55 "cache_token": {"type": "boolean", "default": True},
56 "timezone": {"type": "string", "default": "UTC"},
57 },
58 "credentials": {
59 "username": {"type": "string", "default": None},
60 "password": {"type": "string", "default": None},
61 "api_key": {"type": "string", "default": None},
62 },
63 "api": {"url": {"type": "string", "default": None}},
64 "auth": {"url": {"type": "string", "default": None}},
65 "stream": {"url": {"type": "string", "default": None}},
66 }
67
68 CONFIG_DEFAULT_VALUES = {}
69
70 for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):
71 CONFIG_DEFAULT_VALUES[section] = {}
72
73 for key, options in six.iteritems(keys):
74 default_value = options["default"]
75 CONFIG_DEFAULT_VALUES[section][key] = default_value
76
77
78 class CLIConfigParser(object):
79 def __init__(
80 self,
81 config_file_path,
82 validate_config_exists=True,
83 validate_config_permissions=True,
84 log=None,
85 ):
86 if validate_config_exists and not os.path.isfile(config_file_path):
87 raise ValueError('Config file "%s" doesn\'t exist')
88
89 if log is None:
90 log = logging.getLogger(__name__)
91 logging.basicConfig()
92
93 self.config_file_path = config_file_path
94 self.validate_config_permissions = validate_config_permissions
95 self.LOG = log
96
97 def parse(self):
98 """
99 Parse the config and return a dict with the parsed values.
100
101 :rtype: ``dict``
102 """
103 result = defaultdict(dict)
104
105 if not os.path.isfile(self.config_file_path):
106 # Config doesn't exist, return the default values
107 return CONFIG_DEFAULT_VALUES
108
109 config_dir_path = os.path.dirname(self.config_file_path)
110
111 if self.validate_config_permissions:
112 # Make sure the directory permissions == 0o770
113 if bool(os.stat(config_dir_path).st_mode & 0o7):
114 self.LOG.warn(
115 "The StackStorm configuration directory permissions are "
116 "insecure (too permissive): others have access."
117 )
118
119 # Make sure the setgid bit is set on the directory
120 if not bool(os.stat(config_dir_path).st_mode & 0o2000):
121 self.LOG.info(
122 "The SGID bit is not set on the StackStorm configuration "
123 "directory."
124 )
125
126 # Make sure the file permissions == 0o660
127 if bool(os.stat(self.config_file_path).st_mode & 0o7):
128 self.LOG.warn(
129 "The StackStorm configuration file permissions are "
130 "insecure: others have access."
131 )
132
133 config = ConfigParser()
134 with io.open(self.config_file_path, "r", encoding="utf8") as fp:
135 config.readfp(fp)
136
137 for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):
138 for key, options in six.iteritems(keys):
139 key_type = options["type"]
140 key_default_value = options["default"]
141
142 if config.has_option(section, key):
143 if key_type in ["str", "string"]:
144 get_func = config.get
145 elif key_type in ["int", "integer"]:
146 get_func = config.getint
147 elif key_type in ["float"]:
148 get_func = config.getfloat
149 elif key_type in ["bool", "boolean"]:
150 get_func = config.getboolean
151 else:
152 msg = 'Invalid type "%s" for option "%s"' % (key_type, key)
153 raise ValueError(msg)
154
155 value = get_func(section, key)
156 result[section][key] = value
157 else:
158 result[section][key] = key_default_value
159
160 return dict(result)
161
[end of st2client/st2client/config_parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2client/st2client/config_parser.py b/st2client/st2client/config_parser.py
--- a/st2client/st2client/config_parser.py
+++ b/st2client/st2client/config_parser.py
@@ -152,7 +152,7 @@
msg = 'Invalid type "%s" for option "%s"' % (key_type, key)
raise ValueError(msg)
- value = get_func(section, key)
+ value = get_func(section, key, raw=True)
result[section][key] = value
else:
result[section][key] = key_default_value
|
{"golden_diff": "diff --git a/st2client/st2client/config_parser.py b/st2client/st2client/config_parser.py\n--- a/st2client/st2client/config_parser.py\n+++ b/st2client/st2client/config_parser.py\n@@ -152,7 +152,7 @@\n msg = 'Invalid type \"%s\" for option \"%s\"' % (key_type, key)\n raise ValueError(msg)\n \n- value = get_func(section, key)\n+ value = get_func(section, key, raw=True)\n result[section][key] = value\n else:\n result[section][key] = key_default_value\n", "issue": "Special characters in st2admin account causing st2 key failure\nJust upgraded to version 3.4 and my keyvault is having problems. I believe it's due to my st2admin password containing special characters.\r\n\r\n```\r\n[root@stackstorm workflows]# st2 key list --scope=all\r\nTraceback (most recent call last):\r\n File \"/bin/st2\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py\", line 470, in main\r\n return Shell().run(argv)\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/shell.py\", line 385, in run\r\n config = self._parse_config_file(args=args, validate_config_permissions=False)\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/base.py\", line 183, in _parse_config_file\r\n result = parser.parse()\r\n File \"/opt/stackstorm/st2/lib/python3.6/site-packages/st2client/config_parser.py\", line 197, in parse\r\n value = get_func(section, key)\r\n File \"/usr/lib64/python3.6/configparser.py\", line 800, in get\r\n d)\r\n File \"/usr/lib64/python3.6/configparser.py\", line 394, in before_get\r\n self._interpolate_some(parser, option, L, value, section, defaults, 1)\r\n File \"/usr/lib64/python3.6/configparser.py\", line 444, in _interpolate_some\r\n \"found: %r\" % (rest,))\r\nconfigparser.InterpolationSyntaxError: '%' must be followed by '%' or '(', found: '%3C#V~Bvg%3E3t+'\r\n```\r\n\r\nThis password above is what I used to install stackstorm. Or at least part of it. I've since changed the password via the documented htpasswd method, but the issue persists. Any tips? Left the password in for research purposes.\r\n\r\ncurl -sSL https://stackstorm.com/packages/install.sh | bash -s -- --user=st2admin --password='q7j/t%3C#V~Bvg%3E3t+'\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule for parsing CLI config file.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport os\n\nfrom collections import defaultdict\n\nimport io\n\nimport six\nfrom six.moves.configparser import ConfigParser\n\n\n__all__ = [\n \"CLIConfigParser\",\n \"ST2_CONFIG_DIRECTORY\",\n \"ST2_CONFIG_PATH\",\n \"CONFIG_DEFAULT_VALUES\",\n]\n\nST2_CONFIG_DIRECTORY = \"~/.st2\"\nST2_CONFIG_DIRECTORY = os.path.abspath(os.path.expanduser(ST2_CONFIG_DIRECTORY))\n\nST2_CONFIG_PATH = os.path.abspath(os.path.join(ST2_CONFIG_DIRECTORY, \"config\"))\n\nCONFIG_FILE_OPTIONS = {\n \"general\": {\n \"base_url\": {\"type\": \"string\", \"default\": None},\n \"api_version\": {\"type\": \"string\", \"default\": None},\n \"cacert\": {\"type\": \"string\", \"default\": None},\n \"silence_ssl_warnings\": {\"type\": \"bool\", \"default\": False},\n \"silence_schema_output\": {\"type\": \"bool\", \"default\": True},\n },\n \"cli\": {\n \"debug\": {\"type\": \"bool\", \"default\": False},\n \"cache_token\": {\"type\": \"boolean\", \"default\": True},\n \"timezone\": {\"type\": \"string\", \"default\": \"UTC\"},\n },\n \"credentials\": {\n \"username\": {\"type\": \"string\", \"default\": None},\n \"password\": {\"type\": \"string\", \"default\": None},\n \"api_key\": {\"type\": \"string\", \"default\": None},\n },\n \"api\": {\"url\": {\"type\": \"string\", \"default\": None}},\n \"auth\": {\"url\": {\"type\": \"string\", \"default\": None}},\n \"stream\": {\"url\": {\"type\": \"string\", \"default\": None}},\n}\n\nCONFIG_DEFAULT_VALUES = {}\n\nfor section, keys in six.iteritems(CONFIG_FILE_OPTIONS):\n CONFIG_DEFAULT_VALUES[section] = {}\n\n for key, options in six.iteritems(keys):\n default_value = options[\"default\"]\n CONFIG_DEFAULT_VALUES[section][key] = default_value\n\n\nclass CLIConfigParser(object):\n def __init__(\n self,\n config_file_path,\n validate_config_exists=True,\n validate_config_permissions=True,\n log=None,\n ):\n if validate_config_exists and not os.path.isfile(config_file_path):\n raise ValueError('Config file \"%s\" doesn\\'t exist')\n\n if log is None:\n log = logging.getLogger(__name__)\n logging.basicConfig()\n\n self.config_file_path = config_file_path\n self.validate_config_permissions = validate_config_permissions\n self.LOG = log\n\n def parse(self):\n \"\"\"\n Parse the config and return a dict with the parsed values.\n\n :rtype: ``dict``\n \"\"\"\n result = defaultdict(dict)\n\n if not os.path.isfile(self.config_file_path):\n # Config doesn't exist, return the default values\n return CONFIG_DEFAULT_VALUES\n\n config_dir_path = os.path.dirname(self.config_file_path)\n\n if self.validate_config_permissions:\n # Make sure the directory permissions == 0o770\n if bool(os.stat(config_dir_path).st_mode & 0o7):\n self.LOG.warn(\n \"The StackStorm configuration directory permissions are \"\n \"insecure (too permissive): others have access.\"\n )\n\n # Make sure the setgid bit is set on the directory\n if not bool(os.stat(config_dir_path).st_mode & 0o2000):\n self.LOG.info(\n \"The SGID bit is not set on the StackStorm configuration \"\n \"directory.\"\n )\n\n # Make sure the file permissions == 0o660\n if bool(os.stat(self.config_file_path).st_mode & 0o7):\n self.LOG.warn(\n \"The StackStorm configuration file permissions are \"\n \"insecure: others have access.\"\n )\n\n config = ConfigParser()\n with io.open(self.config_file_path, \"r\", encoding=\"utf8\") as fp:\n config.readfp(fp)\n\n for section, keys in six.iteritems(CONFIG_FILE_OPTIONS):\n for key, options in six.iteritems(keys):\n key_type = options[\"type\"]\n key_default_value = options[\"default\"]\n\n if config.has_option(section, key):\n if key_type in [\"str\", \"string\"]:\n get_func = config.get\n elif key_type in [\"int\", \"integer\"]:\n get_func = config.getint\n elif key_type in [\"float\"]:\n get_func = config.getfloat\n elif key_type in [\"bool\", \"boolean\"]:\n get_func = config.getboolean\n else:\n msg = 'Invalid type \"%s\" for option \"%s\"' % (key_type, key)\n raise ValueError(msg)\n\n value = get_func(section, key)\n result[section][key] = value\n else:\n result[section][key] = key_default_value\n\n return dict(result)\n", "path": "st2client/st2client/config_parser.py"}]}
| 2,618 | 135 |
gh_patches_debug_19660
|
rasdani/github-patches
|
git_diff
|
pypa__pip-1725
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Traceback if any uppercase command entered.
c:\Python34\Scripts>pip3.exe -V
pip 1.5.2 from C:\Python34\lib\site-packages (python 3.4)
Windows 7.1
c:\Python34\Scripts>pip3.exe LIST
Traceback (most recent call last):
File "C:\Python34\lib\runpy.py", line 171, in _run_module_as_main
"__main__", mod_spec)
File "C:\Python34\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "c:\Python34\Scripts\pip3.exe__main__.py", line 9, in <module>
File "C:\Python34\lib\site-packages\pip__init__.py", line 177, in main
cmd_name, cmd_args = parseopts(initial_args)
File "C:\Python34\lib\site-packages\pip__init__.py", line 156, in parseopts
cmd_args.remove(args_else[0].lower())
ValueError: list.remove(x): x not in list
This was confirmed in response to my query on the main python mailing list.
</issue>
<code>
[start of pip/commands/__init__.py]
1 """
2 Package containing all pip commands
3 """
4
5
6 from pip.commands.bundle import BundleCommand
7 from pip.commands.completion import CompletionCommand
8 from pip.commands.freeze import FreezeCommand
9 from pip.commands.help import HelpCommand
10 from pip.commands.list import ListCommand
11 from pip.commands.search import SearchCommand
12 from pip.commands.show import ShowCommand
13 from pip.commands.install import InstallCommand
14 from pip.commands.uninstall import UninstallCommand
15 from pip.commands.unzip import UnzipCommand
16 from pip.commands.zip import ZipCommand
17 from pip.commands.wheel import WheelCommand
18
19
20 commands = {
21 BundleCommand.name: BundleCommand,
22 CompletionCommand.name: CompletionCommand,
23 FreezeCommand.name: FreezeCommand,
24 HelpCommand.name: HelpCommand,
25 SearchCommand.name: SearchCommand,
26 ShowCommand.name: ShowCommand,
27 InstallCommand.name: InstallCommand,
28 UninstallCommand.name: UninstallCommand,
29 UnzipCommand.name: UnzipCommand,
30 ZipCommand.name: ZipCommand,
31 ListCommand.name: ListCommand,
32 WheelCommand.name: WheelCommand,
33 }
34
35
36 commands_order = [
37 InstallCommand,
38 UninstallCommand,
39 FreezeCommand,
40 ListCommand,
41 ShowCommand,
42 SearchCommand,
43 WheelCommand,
44 ZipCommand,
45 UnzipCommand,
46 BundleCommand,
47 HelpCommand,
48 ]
49
50
51 def get_summaries(ignore_hidden=True, ordered=True):
52 """Yields sorted (command name, command summary) tuples."""
53
54 if ordered:
55 cmditems = _sort_commands(commands, commands_order)
56 else:
57 cmditems = commands.items()
58
59 for name, command_class in cmditems:
60 if ignore_hidden and command_class.hidden:
61 continue
62
63 yield (name, command_class.summary)
64
65
66 def get_similar_commands(name):
67 """Command name auto-correct."""
68 from difflib import get_close_matches
69
70 close_commands = get_close_matches(name, commands.keys())
71
72 if close_commands:
73 guess = close_commands[0]
74 else:
75 guess = False
76
77 return guess
78
79
80 def _sort_commands(cmddict, order):
81 def keyfn(key):
82 try:
83 return order.index(key[1])
84 except ValueError:
85 # unordered items should come last
86 return 0xff
87
88 return sorted(cmddict.items(), key=keyfn)
89
[end of pip/commands/__init__.py]
[start of pip/__init__.py]
1 #!/usr/bin/env python
2 import os
3 import optparse
4
5 import sys
6 import re
7
8 from pip.exceptions import InstallationError, CommandError, PipError
9 from pip.log import logger
10 from pip.util import get_installed_distributions, get_prog
11 from pip.vcs import git, mercurial, subversion, bazaar # noqa
12 from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
13 from pip.commands import commands, get_summaries, get_similar_commands
14
15 # This fixes a peculiarity when importing via __import__ - as we are
16 # initialising the pip module, "from pip import cmdoptions" is recursive
17 # and appears not to work properly in that situation.
18 import pip.cmdoptions
19 cmdoptions = pip.cmdoptions
20
21 # The version as used in the setup.py and the docs conf.py
22 __version__ = "1.6.dev1"
23
24
25 def autocomplete():
26 """Command and option completion for the main option parser (and options)
27 and its subcommands (and options).
28
29 Enable by sourcing one of the completion shell scripts (bash or zsh).
30 """
31 # Don't complete if user hasn't sourced bash_completion file.
32 if 'PIP_AUTO_COMPLETE' not in os.environ:
33 return
34 cwords = os.environ['COMP_WORDS'].split()[1:]
35 cword = int(os.environ['COMP_CWORD'])
36 try:
37 current = cwords[cword - 1]
38 except IndexError:
39 current = ''
40
41 subcommands = [cmd for cmd, summary in get_summaries()]
42 options = []
43 # subcommand
44 try:
45 subcommand_name = [w for w in cwords if w in subcommands][0]
46 except IndexError:
47 subcommand_name = None
48
49 parser = create_main_parser()
50 # subcommand options
51 if subcommand_name:
52 # special case: 'help' subcommand has no options
53 if subcommand_name == 'help':
54 sys.exit(1)
55 # special case: list locally installed dists for uninstall command
56 if subcommand_name == 'uninstall' and not current.startswith('-'):
57 installed = []
58 lc = current.lower()
59 for dist in get_installed_distributions(local_only=True):
60 if dist.key.startswith(lc) and dist.key not in cwords[1:]:
61 installed.append(dist.key)
62 # if there are no dists installed, fall back to option completion
63 if installed:
64 for dist in installed:
65 print(dist)
66 sys.exit(1)
67
68 subcommand = commands[subcommand_name]()
69 options += [(opt.get_opt_string(), opt.nargs)
70 for opt in subcommand.parser.option_list_all
71 if opt.help != optparse.SUPPRESS_HELP]
72
73 # filter out previously specified options from available options
74 prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
75 options = [(x, v) for (x, v) in options if x not in prev_opts]
76 # filter options by current input
77 options = [(k, v) for k, v in options if k.startswith(current)]
78 for option in options:
79 opt_label = option[0]
80 # append '=' to options which require args
81 if option[1]:
82 opt_label += '='
83 print(opt_label)
84 else:
85 # show main parser options only when necessary
86 if current.startswith('-') or current.startswith('--'):
87 opts = [i.option_list for i in parser.option_groups]
88 opts.append(parser.option_list)
89 opts = (o for it in opts for o in it)
90
91 subcommands += [i.get_opt_string() for i in opts
92 if i.help != optparse.SUPPRESS_HELP]
93
94 print(' '.join([x for x in subcommands if x.startswith(current)]))
95 sys.exit(1)
96
97
98 def create_main_parser():
99 parser_kw = {
100 'usage': '\n%prog <command> [options]',
101 'add_help_option': False,
102 'formatter': UpdatingDefaultsHelpFormatter(),
103 'name': 'global',
104 'prog': get_prog(),
105 }
106
107 parser = ConfigOptionParser(**parser_kw)
108 parser.disable_interspersed_args()
109
110 pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
111 parser.version = 'pip %s from %s (python %s)' % (
112 __version__, pip_pkg_dir, sys.version[:3])
113
114 # add the general options
115 gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
116 parser.add_option_group(gen_opts)
117
118 parser.main = True # so the help formatter knows
119
120 # create command listing for description
121 command_summaries = get_summaries()
122 description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
123 parser.description = '\n'.join(description)
124
125 return parser
126
127
128 def parseopts(args):
129 parser = create_main_parser()
130
131 # Note: parser calls disable_interspersed_args(), so the result of this
132 # call is to split the initial args into the general options before the
133 # subcommand and everything else.
134 # For example:
135 # args: ['--timeout=5', 'install', '--user', 'INITools']
136 # general_options: ['--timeout==5']
137 # args_else: ['install', '--user', 'INITools']
138 general_options, args_else = parser.parse_args(args)
139
140 # --version
141 if general_options.version:
142 sys.stdout.write(parser.version)
143 sys.stdout.write(os.linesep)
144 sys.exit()
145
146 # pip || pip help -> print_help()
147 if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
148 parser.print_help()
149 sys.exit()
150
151 # the subcommand name
152 cmd_name = args_else[0].lower()
153
154 # all the args without the subcommand
155 cmd_args = args[:]
156 cmd_args.remove(args_else[0].lower())
157
158 if cmd_name not in commands:
159 guess = get_similar_commands(cmd_name)
160
161 msg = ['unknown command "%s"' % cmd_name]
162 if guess:
163 msg.append('maybe you meant "%s"' % guess)
164
165 raise CommandError(' - '.join(msg))
166
167 return cmd_name, cmd_args
168
169
170 def main(args=None):
171 if args is None:
172 args = sys.argv[1:]
173
174 autocomplete()
175
176 try:
177 cmd_name, cmd_args = parseopts(args)
178 except PipError as exc:
179 sys.stderr.write("ERROR: %s" % exc)
180 sys.stderr.write(os.linesep)
181 sys.exit(1)
182
183 command = commands[cmd_name]()
184 return command.main(cmd_args)
185
186
187 # ###########################################################
188 # # Writing freeze files
189
190 class FrozenRequirement(object):
191
192 def __init__(self, name, req, editable, comments=()):
193 self.name = name
194 self.req = req
195 self.editable = editable
196 self.comments = comments
197
198 _rev_re = re.compile(r'-r(\d+)$')
199 _date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
200
201 @classmethod
202 def from_dist(cls, dist, find_tags=False):
203 location = os.path.normcase(os.path.abspath(dist.location))
204 comments = []
205 from pip.vcs import vcs, get_src_requirement
206 if vcs.get_backend_name(location):
207 editable = True
208 try:
209 req = get_src_requirement(dist, location, find_tags)
210 except InstallationError as exc:
211 logger.warn(
212 "Error when trying to get requirement for VCS system %s, "
213 "falling back to uneditable format" % exc
214 )
215 req = None
216 if req is None:
217 logger.warn(
218 'Could not determine repository location of %s' % location
219 )
220 comments.append(
221 '## !! Could not determine repository location'
222 )
223 req = dist.as_requirement()
224 editable = False
225 else:
226 editable = False
227 req = dist.as_requirement()
228 specs = req.specs
229 assert len(specs) == 1 and specs[0][0] == '=='
230
231 return cls(dist.project_name, req, editable, comments)
232
233 @staticmethod
234 def egg_name(dist):
235 name = dist.egg_name()
236 match = re.search(r'-py\d\.\d$', name)
237 if match:
238 name = name[:match.start()]
239 return name
240
241 def __str__(self):
242 req = self.req
243 if self.editable:
244 req = '-e %s' % req
245 return '\n'.join(list(self.comments) + [str(req)]) + '\n'
246
247
248 if __name__ == '__main__':
249 sys.exit(main())
250
[end of pip/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/__init__.py b/pip/__init__.py
--- a/pip/__init__.py
+++ b/pip/__init__.py
@@ -149,11 +149,11 @@
sys.exit()
# the subcommand name
- cmd_name = args_else[0].lower()
+ cmd_name = args_else[0]
# all the args without the subcommand
cmd_args = args[:]
- cmd_args.remove(args_else[0].lower())
+ cmd_args.remove(args_else[0])
if cmd_name not in commands:
guess = get_similar_commands(cmd_name)
diff --git a/pip/commands/__init__.py b/pip/commands/__init__.py
--- a/pip/commands/__init__.py
+++ b/pip/commands/__init__.py
@@ -67,14 +67,14 @@
"""Command name auto-correct."""
from difflib import get_close_matches
+ name = name.lower()
+
close_commands = get_close_matches(name, commands.keys())
if close_commands:
- guess = close_commands[0]
+ return close_commands[0]
else:
- guess = False
-
- return guess
+ return False
def _sort_commands(cmddict, order):
|
{"golden_diff": "diff --git a/pip/__init__.py b/pip/__init__.py\n--- a/pip/__init__.py\n+++ b/pip/__init__.py\n@@ -149,11 +149,11 @@\n sys.exit()\n \n # the subcommand name\n- cmd_name = args_else[0].lower()\n+ cmd_name = args_else[0]\n \n # all the args without the subcommand\n cmd_args = args[:]\n- cmd_args.remove(args_else[0].lower())\n+ cmd_args.remove(args_else[0])\n \n if cmd_name not in commands:\n guess = get_similar_commands(cmd_name)\ndiff --git a/pip/commands/__init__.py b/pip/commands/__init__.py\n--- a/pip/commands/__init__.py\n+++ b/pip/commands/__init__.py\n@@ -67,14 +67,14 @@\n \"\"\"Command name auto-correct.\"\"\"\n from difflib import get_close_matches\n \n+ name = name.lower()\n+\n close_commands = get_close_matches(name, commands.keys())\n \n if close_commands:\n- guess = close_commands[0]\n+ return close_commands[0]\n else:\n- guess = False\n-\n- return guess\n+ return False\n \n \n def _sort_commands(cmddict, order):\n", "issue": "Traceback if any uppercase command entered.\nc:\\Python34\\Scripts>pip3.exe -V\npip 1.5.2 from C:\\Python34\\lib\\site-packages (python 3.4)\nWindows 7.1\n\nc:\\Python34\\Scripts>pip3.exe LIST\nTraceback (most recent call last):\n File \"C:\\Python34\\lib\\runpy.py\", line 171, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"C:\\Python34\\lib\\runpy.py\", line 86, in _run_code\n exec(code, run_globals)\n File \"c:\\Python34\\Scripts\\pip3.exe__main__.py\", line 9, in <module>\n File \"C:\\Python34\\lib\\site-packages\\pip__init__.py\", line 177, in main\n cmd_name, cmd_args = parseopts(initial_args)\n File \"C:\\Python34\\lib\\site-packages\\pip__init__.py\", line 156, in parseopts\n cmd_args.remove(args_else[0].lower())\nValueError: list.remove(x): x not in list\n\nThis was confirmed in response to my query on the main python mailing list.\n\n", "before_files": [{"content": "\"\"\"\nPackage containing all pip commands\n\"\"\"\n\n\nfrom pip.commands.bundle import BundleCommand\nfrom pip.commands.completion import CompletionCommand\nfrom pip.commands.freeze import FreezeCommand\nfrom pip.commands.help import HelpCommand\nfrom pip.commands.list import ListCommand\nfrom pip.commands.search import SearchCommand\nfrom pip.commands.show import ShowCommand\nfrom pip.commands.install import InstallCommand\nfrom pip.commands.uninstall import UninstallCommand\nfrom pip.commands.unzip import UnzipCommand\nfrom pip.commands.zip import ZipCommand\nfrom pip.commands.wheel import WheelCommand\n\n\ncommands = {\n BundleCommand.name: BundleCommand,\n CompletionCommand.name: CompletionCommand,\n FreezeCommand.name: FreezeCommand,\n HelpCommand.name: HelpCommand,\n SearchCommand.name: SearchCommand,\n ShowCommand.name: ShowCommand,\n InstallCommand.name: InstallCommand,\n UninstallCommand.name: UninstallCommand,\n UnzipCommand.name: UnzipCommand,\n ZipCommand.name: ZipCommand,\n ListCommand.name: ListCommand,\n WheelCommand.name: WheelCommand,\n}\n\n\ncommands_order = [\n InstallCommand,\n UninstallCommand,\n FreezeCommand,\n ListCommand,\n ShowCommand,\n SearchCommand,\n WheelCommand,\n ZipCommand,\n UnzipCommand,\n BundleCommand,\n HelpCommand,\n]\n\n\ndef get_summaries(ignore_hidden=True, ordered=True):\n \"\"\"Yields sorted (command name, command summary) tuples.\"\"\"\n\n if ordered:\n cmditems = _sort_commands(commands, commands_order)\n else:\n cmditems = commands.items()\n\n for name, command_class in cmditems:\n if ignore_hidden and command_class.hidden:\n continue\n\n yield (name, command_class.summary)\n\n\ndef get_similar_commands(name):\n \"\"\"Command name auto-correct.\"\"\"\n from difflib import get_close_matches\n\n close_commands = get_close_matches(name, commands.keys())\n\n if close_commands:\n guess = close_commands[0]\n else:\n guess = False\n\n return guess\n\n\ndef _sort_commands(cmddict, order):\n def keyfn(key):\n try:\n return order.index(key[1])\n except ValueError:\n # unordered items should come last\n return 0xff\n\n return sorted(cmddict.items(), key=keyfn)\n", "path": "pip/commands/__init__.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport optparse\n\nimport sys\nimport re\n\nfrom pip.exceptions import InstallationError, CommandError, PipError\nfrom pip.log import logger\nfrom pip.util import get_installed_distributions, get_prog\nfrom pip.vcs import git, mercurial, subversion, bazaar # noqa\nfrom pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter\nfrom pip.commands import commands, get_summaries, get_similar_commands\n\n# This fixes a peculiarity when importing via __import__ - as we are\n# initialising the pip module, \"from pip import cmdoptions\" is recursive\n# and appears not to work properly in that situation.\nimport pip.cmdoptions\ncmdoptions = pip.cmdoptions\n\n# The version as used in the setup.py and the docs conf.py\n__version__ = \"1.6.dev1\"\n\n\ndef autocomplete():\n \"\"\"Command and option completion for the main option parser (and options)\n and its subcommands (and options).\n\n Enable by sourcing one of the completion shell scripts (bash or zsh).\n \"\"\"\n # Don't complete if user hasn't sourced bash_completion file.\n if 'PIP_AUTO_COMPLETE' not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword - 1]\n except IndexError:\n current = ''\n\n subcommands = [cmd for cmd, summary in get_summaries()]\n options = []\n # subcommand\n try:\n subcommand_name = [w for w in cwords if w in subcommands][0]\n except IndexError:\n subcommand_name = None\n\n parser = create_main_parser()\n # subcommand options\n if subcommand_name:\n # special case: 'help' subcommand has no options\n if subcommand_name == 'help':\n sys.exit(1)\n # special case: list locally installed dists for uninstall command\n if subcommand_name == 'uninstall' and not current.startswith('-'):\n installed = []\n lc = current.lower()\n for dist in get_installed_distributions(local_only=True):\n if dist.key.startswith(lc) and dist.key not in cwords[1:]:\n installed.append(dist.key)\n # if there are no dists installed, fall back to option completion\n if installed:\n for dist in installed:\n print(dist)\n sys.exit(1)\n\n subcommand = commands[subcommand_name]()\n options += [(opt.get_opt_string(), opt.nargs)\n for opt in subcommand.parser.option_list_all\n if opt.help != optparse.SUPPRESS_HELP]\n\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n options = [(x, v) for (x, v) in options if x not in prev_opts]\n # filter options by current input\n options = [(k, v) for k, v in options if k.startswith(current)]\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1]:\n opt_label += '='\n print(opt_label)\n else:\n # show main parser options only when necessary\n if current.startswith('-') or current.startswith('--'):\n opts = [i.option_list for i in parser.option_groups]\n opts.append(parser.option_list)\n opts = (o for it in opts for o in it)\n\n subcommands += [i.get_opt_string() for i in opts\n if i.help != optparse.SUPPRESS_HELP]\n\n print(' '.join([x for x in subcommands if x.startswith(current)]))\n sys.exit(1)\n\n\ndef create_main_parser():\n parser_kw = {\n 'usage': '\\n%prog <command> [options]',\n 'add_help_option': False,\n 'formatter': UpdatingDefaultsHelpFormatter(),\n 'name': 'global',\n 'prog': get_prog(),\n }\n\n parser = ConfigOptionParser(**parser_kw)\n parser.disable_interspersed_args()\n\n pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.version = 'pip %s from %s (python %s)' % (\n __version__, pip_pkg_dir, sys.version[:3])\n\n # add the general options\n gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)\n parser.add_option_group(gen_opts)\n\n parser.main = True # so the help formatter knows\n\n # create command listing for description\n command_summaries = get_summaries()\n description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]\n parser.description = '\\n'.join(description)\n\n return parser\n\n\ndef parseopts(args):\n parser = create_main_parser()\n\n # Note: parser calls disable_interspersed_args(), so the result of this\n # call is to split the initial args into the general options before the\n # subcommand and everything else.\n # For example:\n # args: ['--timeout=5', 'install', '--user', 'INITools']\n # general_options: ['--timeout==5']\n # args_else: ['install', '--user', 'INITools']\n general_options, args_else = parser.parse_args(args)\n\n # --version\n if general_options.version:\n sys.stdout.write(parser.version)\n sys.stdout.write(os.linesep)\n sys.exit()\n\n # pip || pip help -> print_help()\n if not args_else or (args_else[0] == 'help' and len(args_else) == 1):\n parser.print_help()\n sys.exit()\n\n # the subcommand name\n cmd_name = args_else[0].lower()\n\n # all the args without the subcommand\n cmd_args = args[:]\n cmd_args.remove(args_else[0].lower())\n\n if cmd_name not in commands:\n guess = get_similar_commands(cmd_name)\n\n msg = ['unknown command \"%s\"' % cmd_name]\n if guess:\n msg.append('maybe you meant \"%s\"' % guess)\n\n raise CommandError(' - '.join(msg))\n\n return cmd_name, cmd_args\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n\n autocomplete()\n\n try:\n cmd_name, cmd_args = parseopts(args)\n except PipError as exc:\n sys.stderr.write(\"ERROR: %s\" % exc)\n sys.stderr.write(os.linesep)\n sys.exit(1)\n\n command = commands[cmd_name]()\n return command.main(cmd_args)\n\n\n# ###########################################################\n# # Writing freeze files\n\nclass FrozenRequirement(object):\n\n def __init__(self, name, req, editable, comments=()):\n self.name = name\n self.req = req\n self.editable = editable\n self.comments = comments\n\n _rev_re = re.compile(r'-r(\\d+)$')\n _date_re = re.compile(r'-(20\\d\\d\\d\\d\\d\\d)$')\n\n @classmethod\n def from_dist(cls, dist, find_tags=False):\n location = os.path.normcase(os.path.abspath(dist.location))\n comments = []\n from pip.vcs import vcs, get_src_requirement\n if vcs.get_backend_name(location):\n editable = True\n try:\n req = get_src_requirement(dist, location, find_tags)\n except InstallationError as exc:\n logger.warn(\n \"Error when trying to get requirement for VCS system %s, \"\n \"falling back to uneditable format\" % exc\n )\n req = None\n if req is None:\n logger.warn(\n 'Could not determine repository location of %s' % location\n )\n comments.append(\n '## !! Could not determine repository location'\n )\n req = dist.as_requirement()\n editable = False\n else:\n editable = False\n req = dist.as_requirement()\n specs = req.specs\n assert len(specs) == 1 and specs[0][0] == '=='\n\n return cls(dist.project_name, req, editable, comments)\n\n @staticmethod\n def egg_name(dist):\n name = dist.egg_name()\n match = re.search(r'-py\\d\\.\\d$', name)\n if match:\n name = name[:match.start()]\n return name\n\n def __str__(self):\n req = self.req\n if self.editable:\n req = '-e %s' % req\n return '\\n'.join(list(self.comments) + [str(req)]) + '\\n'\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "pip/__init__.py"}]}
| 4,041 | 294 |
gh_patches_debug_31811
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__numpyro-342
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document behavior of primitive statements in README
Hello! I'll preface this issue by stating that I'm new to numpyro, so there's a significant chance that I'm not using `numpyro.sample` correctly.
When building models, I want to test draws from my random variables to make sure I'm passing parameters correctly, getting the kwargs correct, etc.; I'll lift a line out of a model function, [like this one](https://github.com/pyro-ppl/numpyro/blob/master/examples/baseball.py#L111), and run it in an IPython window to see if I get an error. It seems like I'm unable to do this by itself in an IPython console.
## Minimal example
```
In [1]: import numpyro
In [2]: import numpyro.distributions as d
In [3]: numpyro.__version__
Out[3]: '0.2.0'
In [4]: numpyro.sample("x", d.Normal(0, 1))
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-4-21ce96e72ad0> in <module>
----> 1 numpyro.sample("x", d.Normal(0, 1))
~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/primitives.py in sample(name, fn, obs, sample_shape)
45 # if there are no active Messengers, we just draw a sample and return it as expected:
46 if not _PYRO_STACK:
---> 47 return fn(sample_shape=sample_shape)
48
49 # Otherwise, we initialize a message...
~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/distributions/distribution.py in __call__(self, *args, **kwargs)
161
162 def __call__(self, *args, **kwargs):
--> 163 key = kwargs.pop('random_state')
164 sample_intermediates = kwargs.pop('sample_intermediates', False)
165 if sample_intermediates:
KeyError: 'random_state'
```
## Thoughts
I can see that in [numpyro.sample](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/primitives.py#L47), we return `fn(sample_shape=sample_shape)`, which in this case attempts to call `dist.Normal(0, 1)(sample_shape=sample_shape)`. However, looking at [distributions.py](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/distributions/distribution.py#L163), it seems that `Distribution.sample` expects a `random_state` kwarg that isn't getting passed.
When I do the following, everything is fine:
```
In [5]: import jax.random as random
In [6]: d.Normal(0, 1).sample(random.PRNGKey(12))
/Users/chtu8001/miniconda3/envs/fusion/lib/python3.7/site-packages/jax/lib/xla_bridge.py:114: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Out[6]: DeviceArray(-0.5135873, dtype=float32)
In [7]: d.Normal(0, 1).sample(random.PRNGKey(12), sample_shape=(4, ))
Out[7]:
DeviceArray([-1.31179953, -0.70821768, 0.18907626, -1.09385514],
dtype=float32)
```
so I expect that we can't use `numpyro.sample` outside a model (although it's not totally clear to me how that is defined, something to do with Messengers and PYRO_STACK). I'm wondering if this is by design and I should just use the second, working example, or if I'm misunderstanding how `numpyro.sample` should be used.
Thanks!
</issue>
<code>
[start of numpyro/primitives.py]
1 from collections import namedtuple
2 import functools
3
4 import jax
5 from jax import lax
6
7 import numpyro
8 from numpyro.distributions.discrete import PRNGIdentity
9
10 _PYRO_STACK = []
11
12
13 CondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])
14
15
16 def apply_stack(msg):
17 pointer = 0
18 for pointer, handler in enumerate(reversed(_PYRO_STACK)):
19 handler.process_message(msg)
20 # When a Messenger sets the "stop" field of a message,
21 # it prevents any Messengers above it on the stack from being applied.
22 if msg.get("stop"):
23 break
24 if msg['value'] is None:
25 if msg['type'] == 'sample':
26 msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],
27 sample_intermediates=True,
28 **msg['kwargs'])
29 else:
30 msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])
31
32 # A Messenger that sets msg["stop"] == True also prevents application
33 # of postprocess_message by Messengers above it on the stack
34 # via the pointer variable from the process_message loop
35 for handler in _PYRO_STACK[-pointer-1:]:
36 handler.postprocess_message(msg)
37 return msg
38
39
40 class Messenger(object):
41 def __init__(self, fn=None):
42 self.fn = fn
43 functools.update_wrapper(self, fn, updated=[])
44
45 def __enter__(self):
46 _PYRO_STACK.append(self)
47
48 def __exit__(self, *args, **kwargs):
49 assert _PYRO_STACK[-1] is self
50 _PYRO_STACK.pop()
51
52 def process_message(self, msg):
53 pass
54
55 def postprocess_message(self, msg):
56 pass
57
58 def __call__(self, *args, **kwargs):
59 with self:
60 return self.fn(*args, **kwargs)
61
62
63 def sample(name, fn, obs=None, sample_shape=()):
64 """
65 Returns a random sample from the stochastic function `fn`. This can have
66 additional side effects when wrapped inside effect handlers like
67 :class:`~numpyro.handlers.substitute`.
68
69 :param str name: name of the sample site
70 :param fn: Python callable
71 :param numpy.ndarray obs: observed value
72 :param sample_shape: Shape of samples to be drawn.
73 :return: sample from the stochastic `fn`.
74 """
75 # if there are no active Messengers, we just draw a sample and return it as expected:
76 if not _PYRO_STACK:
77 return fn(sample_shape=sample_shape)
78
79 # Otherwise, we initialize a message...
80 initial_msg = {
81 'type': 'sample',
82 'name': name,
83 'fn': fn,
84 'args': (),
85 'kwargs': {'sample_shape': sample_shape},
86 'value': obs,
87 'scale': 1.0,
88 'is_observed': obs is not None,
89 'intermediates': [],
90 'cond_indep_stack': [],
91 }
92
93 # ...and use apply_stack to send it to the Messengers
94 msg = apply_stack(initial_msg)
95 return msg['value']
96
97
98 def identity(x, *args, **kwargs):
99 return x
100
101
102 def param(name, init_value=None, **kwargs):
103 """
104 Annotate the given site as an optimizable parameter for use with
105 :mod:`jax.experimental.optimizers`. For an example of how `param` statements
106 can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.
107
108 :param str name: name of site.
109 :param numpy.ndarray init_value: initial value specified by the user. Note that
110 the onus of using this to initialize the optimizer is on the user /
111 inference algorithm, since there is no global parameter store in
112 NumPyro.
113 :return: value for the parameter. Unless wrapped inside a
114 handler like :class:`~numpyro.handlers.substitute`, this will simply
115 return the initial value.
116 """
117 # if there are no active Messengers, we just draw a sample and return it as expected:
118 if not _PYRO_STACK:
119 return init_value
120
121 # Otherwise, we initialize a message...
122 initial_msg = {
123 'type': 'param',
124 'name': name,
125 'fn': identity,
126 'args': (init_value,),
127 'kwargs': kwargs,
128 'value': None,
129 'scale': 1.0,
130 'cond_indep_stack': [],
131 }
132
133 # ...and use apply_stack to send it to the Messengers
134 msg = apply_stack(initial_msg)
135 return msg['value']
136
137
138 def module(name, nn, input_shape=None):
139 """
140 Declare a :mod:`~jax.experimental.stax` style neural network inside a
141 model so that its parameters are registered for optimization via
142 :func:`~numpyro.primitives.param` statements.
143
144 :param str name: name of the module to be registered.
145 :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`
146 constructor function.
147 :param tuple input_shape: shape of the input taken by the
148 neural network.
149 :return: a `apply_fn` with bound parameters that takes an array
150 as an input and returns the neural network transformed output
151 array.
152 """
153 module_key = name + '$params'
154 nn_init, nn_apply = nn
155 nn_params = param(module_key)
156 if nn_params is None:
157 if input_shape is None:
158 raise ValueError('Valid value for `input_size` needed to initialize.')
159 rng = numpyro.sample(name + '$rng', PRNGIdentity())
160 _, nn_params = nn_init(rng, input_shape)
161 param(module_key, nn_params)
162 return jax.partial(nn_apply, nn_params)
163
164
165 class plate(Messenger):
166 """
167 Construct for annotating conditionally independent variables. Within a
168 `plate` context manager, `sample` sites will be automatically broadcasted to
169 the size of the plate. Additionally, a scale factor might be applied by
170 certain inference algorithms if `subsample_size` is specified.
171
172 :param str name: Name of the plate.
173 :param int size: Size of the plate.
174 :param int subsample_size: Optional argument denoting the size of the mini-batch.
175 This can be used to apply a scaling factor by inference algorithms. e.g.
176 when computing ELBO using a mini-batch.
177 :param int dim: Optional argument to specify which dimension in the tensor
178 is used as the plate dim. If `None` (default), the leftmost available dim
179 is allocated.
180 """
181 def __init__(self, name, size, subsample_size=None, dim=None):
182 self.name = name
183 self.size = size
184 self.subsample_size = size if subsample_size is None else subsample_size
185 if dim is not None and dim >= 0:
186 raise ValueError('dim arg must be negative.')
187 self.dim = dim
188 self._validate_and_set_dim()
189 super(plate, self).__init__()
190
191 def _validate_and_set_dim(self):
192 msg = {
193 'type': 'plate',
194 'is_observed': False,
195 'fn': identity,
196 'name': self.name,
197 'args': (None,),
198 'kwargs': {},
199 'value': None,
200 'scale': 1.0,
201 'cond_indep_stack': [],
202 }
203 apply_stack(msg)
204 cond_indep_stack = msg['cond_indep_stack']
205 occupied_dims = {f.dim for f in cond_indep_stack}
206 dim = -1
207 while True:
208 if dim not in occupied_dims:
209 break
210 dim -= 1
211 if self.dim is None:
212 self.dim = dim
213 else:
214 assert self.dim not in occupied_dims
215
216 @staticmethod
217 def _get_batch_shape(cond_indep_stack):
218 n_dims = max(-f.dim for f in cond_indep_stack)
219 batch_shape = [1] * n_dims
220 for f in cond_indep_stack:
221 batch_shape[f.dim] = f.size
222 return tuple(batch_shape)
223
224 def process_message(self, msg):
225 cond_indep_stack = msg['cond_indep_stack']
226 frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)
227 cond_indep_stack.append(frame)
228 batch_shape = self._get_batch_shape(cond_indep_stack)
229 if 'sample_shape' in msg['kwargs']:
230 batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)
231 msg['kwargs']['sample_shape'] = batch_shape
232 msg['scale'] = msg['scale'] * self.size / self.subsample_size
233
[end of numpyro/primitives.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numpyro/primitives.py b/numpyro/primitives.py
--- a/numpyro/primitives.py
+++ b/numpyro/primitives.py
@@ -60,21 +60,28 @@
return self.fn(*args, **kwargs)
-def sample(name, fn, obs=None, sample_shape=()):
+def sample(name, fn, obs=None, random_state=None, sample_shape=()):
"""
Returns a random sample from the stochastic function `fn`. This can have
additional side effects when wrapped inside effect handlers like
:class:`~numpyro.handlers.substitute`.
+ .. note::
+ By design, `sample` primitive is meant to be used inside a NumPyro model.
+ Then :class:`~numpyro.handlers.seed` handler is used to inject a random
+ state to `fn`. In those situations, `random_state` keyword will take no
+ effect.
+
:param str name: name of the sample site
:param fn: Python callable
:param numpy.ndarray obs: observed value
+ :param jax.random.PRNGKey random_state: an optional random key for `fn`.
:param sample_shape: Shape of samples to be drawn.
:return: sample from the stochastic `fn`.
"""
# if there are no active Messengers, we just draw a sample and return it as expected:
if not _PYRO_STACK:
- return fn(sample_shape=sample_shape)
+ return fn(random_state=random_state, sample_shape=sample_shape)
# Otherwise, we initialize a message...
initial_msg = {
@@ -82,7 +89,7 @@
'name': name,
'fn': fn,
'args': (),
- 'kwargs': {'sample_shape': sample_shape},
+ 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},
'value': obs,
'scale': 1.0,
'is_observed': obs is not None,
|
{"golden_diff": "diff --git a/numpyro/primitives.py b/numpyro/primitives.py\n--- a/numpyro/primitives.py\n+++ b/numpyro/primitives.py\n@@ -60,21 +60,28 @@\n return self.fn(*args, **kwargs)\n \n \n-def sample(name, fn, obs=None, sample_shape=()):\n+def sample(name, fn, obs=None, random_state=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n \n+ .. note::\n+ By design, `sample` primitive is meant to be used inside a NumPyro model.\n+ Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n+ state to `fn`. In those situations, `random_state` keyword will take no\n+ effect.\n+\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n+ :param jax.random.PRNGKey random_state: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n- return fn(sample_shape=sample_shape)\n+ return fn(random_state=random_state, sample_shape=sample_shape)\n \n # Otherwise, we initialize a message...\n initial_msg = {\n@@ -82,7 +89,7 @@\n 'name': name,\n 'fn': fn,\n 'args': (),\n- 'kwargs': {'sample_shape': sample_shape},\n+ 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n", "issue": "Document behavior of primitive statements in README\nHello! I'll preface this issue by stating that I'm new to numpyro, so there's a significant chance that I'm not using `numpyro.sample` correctly.\r\n\r\nWhen building models, I want to test draws from my random variables to make sure I'm passing parameters correctly, getting the kwargs correct, etc.; I'll lift a line out of a model function, [like this one](https://github.com/pyro-ppl/numpyro/blob/master/examples/baseball.py#L111), and run it in an IPython window to see if I get an error. It seems like I'm unable to do this by itself in an IPython console.\r\n\r\n## Minimal example\r\n\r\n```\r\nIn [1]: import numpyro\r\n\r\nIn [2]: import numpyro.distributions as d\r\n\r\nIn [3]: numpyro.__version__\r\nOut[3]: '0.2.0'\r\n\r\nIn [4]: numpyro.sample(\"x\", d.Normal(0, 1))\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-4-21ce96e72ad0> in <module>\r\n----> 1 numpyro.sample(\"x\", d.Normal(0, 1))\r\n\r\n~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/primitives.py in sample(name, fn, obs, sample_shape)\r\n 45 # if there are no active Messengers, we just draw a sample and return it as expected:\r\n 46 if not _PYRO_STACK:\r\n---> 47 return fn(sample_shape=sample_shape)\r\n 48\r\n 49 # Otherwise, we initialize a message...\r\n\r\n~/miniconda3/envs/fusion/lib/python3.7/site-packages/numpyro/distributions/distribution.py in __call__(self, *args, **kwargs)\r\n 161\r\n 162 def __call__(self, *args, **kwargs):\r\n--> 163 key = kwargs.pop('random_state')\r\n 164 sample_intermediates = kwargs.pop('sample_intermediates', False)\r\n 165 if sample_intermediates:\r\n\r\nKeyError: 'random_state'\r\n```\r\n\r\n## Thoughts\r\nI can see that in [numpyro.sample](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/primitives.py#L47), we return `fn(sample_shape=sample_shape)`, which in this case attempts to call `dist.Normal(0, 1)(sample_shape=sample_shape)`. However, looking at [distributions.py](https://github.com/pyro-ppl/numpyro/blob/master/numpyro/distributions/distribution.py#L163), it seems that `Distribution.sample` expects a `random_state` kwarg that isn't getting passed.\r\n\r\nWhen I do the following, everything is fine:\r\n\r\n```\r\nIn [5]: import jax.random as random\r\n\r\nIn [6]: d.Normal(0, 1).sample(random.PRNGKey(12))\r\n/Users/chtu8001/miniconda3/envs/fusion/lib/python3.7/site-packages/jax/lib/xla_bridge.py:114: UserWarning: No GPU/TPU found, falling back to CPU.\r\n warnings.warn('No GPU/TPU found, falling back to CPU.')\r\nOut[6]: DeviceArray(-0.5135873, dtype=float32)\r\n\r\nIn [7]: d.Normal(0, 1).sample(random.PRNGKey(12), sample_shape=(4, ))\r\nOut[7]:\r\nDeviceArray([-1.31179953, -0.70821768, 0.18907626, -1.09385514],\r\n dtype=float32)\r\n```\r\n\r\nso I expect that we can't use `numpyro.sample` outside a model (although it's not totally clear to me how that is defined, something to do with Messengers and PYRO_STACK). I'm wondering if this is by design and I should just use the second, working example, or if I'm misunderstanding how `numpyro.sample` should be used.\r\n\r\nThanks!\n", "before_files": [{"content": "from collections import namedtuple\nimport functools\n\nimport jax\nfrom jax import lax\n\nimport numpyro\nfrom numpyro.distributions.discrete import PRNGIdentity\n\n_PYRO_STACK = []\n\n\nCondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])\n\n\ndef apply_stack(msg):\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n if msg['value'] is None:\n if msg['type'] == 'sample':\n msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],\n sample_intermediates=True,\n **msg['kwargs'])\n else:\n msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer-1:]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, *args, **kwargs):\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef sample(name, fn, obs=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return fn(sample_shape=sample_shape)\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'sample',\n 'name': name,\n 'fn': fn,\n 'args': (),\n 'kwargs': {'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n 'intermediates': [],\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.experimental.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.\n\n :param str name: name of site.\n :param numpy.ndarray init_value: initial value specified by the user. Note that\n the onus of using this to initialize the optimizer is on the user /\n inference algorithm, since there is no global parameter store in\n NumPyro.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return init_value\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'param',\n 'name': name,\n 'fn': identity,\n 'args': (init_value,),\n 'kwargs': kwargs,\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.experimental.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + '$params'\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError('Valid value for `input_size` needed to initialize.')\n rng = numpyro.sample(name + '$rng', PRNGIdentity())\n _, nn_params = nn_init(rng, input_shape)\n param(module_key, nn_params)\n return jax.partial(nn_apply, nn_params)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the leftmost available dim\n is allocated.\n \"\"\"\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n self.size = size\n self.subsample_size = size if subsample_size is None else subsample_size\n if dim is not None and dim >= 0:\n raise ValueError('dim arg must be negative.')\n self.dim = dim\n self._validate_and_set_dim()\n super(plate, self).__init__()\n\n def _validate_and_set_dim(self):\n msg = {\n 'type': 'plate',\n 'is_observed': False,\n 'fn': identity,\n 'name': self.name,\n 'args': (None,),\n 'kwargs': {},\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n apply_stack(msg)\n cond_indep_stack = msg['cond_indep_stack']\n occupied_dims = {f.dim for f in cond_indep_stack}\n dim = -1\n while True:\n if dim not in occupied_dims:\n break\n dim -= 1\n if self.dim is None:\n self.dim = dim\n else:\n assert self.dim not in occupied_dims\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n batch_shape = self._get_batch_shape(cond_indep_stack)\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n msg['scale'] = msg['scale'] * self.size / self.subsample_size\n", "path": "numpyro/primitives.py"}]}
| 3,940 | 434 |
gh_patches_debug_17140
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-nlp-997
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a temperature argument to the Contrastive Sampler
Similar to our other samplers, we should add the temperature argument to the `Contrastive Sampler` along with the necessary tests.
The approach for accomplishing the same can be seen in #951.
I would like to take this up. Thanks!
</issue>
<code>
[start of keras_nlp/samplers/contrastive_sampler.py]
1 # Copyright 2023 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Contrastive Sampler."""
15
16 import tensorflow as tf
17 from tensorflow import keras
18 from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
19
20 from keras_nlp.api_export import keras_nlp_export
21 from keras_nlp.samplers.sampler import Sampler
22 from keras_nlp.samplers.sampler import call_args_docstring
23 from keras_nlp.utils.python_utils import format_docstring
24
25
26 @format_docstring(call_args=call_args_docstring)
27 @keras_nlp_export("keras_nlp.samplers.ContrastiveSampler")
28 class ContrastiveSampler(Sampler):
29 """Contrastive Sampler class.
30
31 This sampler implements contrastive search algorithm. In short, the sampler
32 chooses the token having the max "score" as the next token. The "score" is
33 a weighted sum between token's probability and max similarity against
34 previous tokens. By using this joint score, contrastive sampler reduces the
35 behavior of duplicating seen tokens.
36
37 Args:
38 k: int, the `k` value of top-k. Next token will be chosen from k tokens.
39 alpha: float, the weight of minus max similarity in joint score
40 computation. The larger the value of `alpha`, the score relies more
41 on the similarity than the token probability.
42 seed: int, defaults to None. The random seed.
43
44 Call Args:
45 {{call_args}}
46
47 Examples:
48 ```python
49 # Use a simple alphabet of lowercase characters to [0, 26).
50 int_lookup = {i: chr(i + ord("a")) for i in range(26)}
51 char_lookup = {v: k for k, v in int_lookup.items()}
52 batch_size, length, vocab_size = 1, 12, len(int_lookup)
53 hidden_size = 5
54 index = 5
55
56 def next(prompt, cache, index):
57 prompt_batch_size = tf.shape(prompt)[0]
58 hidden_states = tf.ones((prompt_batch_size, hidden_size))
59 # A uniform distribution over our alphabet.
60 logits = tf.ones((prompt_batch_size, vocab_size))
61 return logits, hidden_states, cache
62
63 output = keras_nlp.samplers.ContrastiveSampler()(
64 next=next,
65 prompt=tf.fill((batch_size, length), char_lookup["z"]),
66 index=index,
67 hidden_states=tf.ones([batch_size, index, hidden_size]),
68 )
69 print(["".join([int_lookup[i] for i in s]) for s in output.numpy()])
70 # >>> "zzzzzeeeeeee"
71 ```
72 """
73
74 def __init__(
75 self,
76 k=5,
77 alpha=0.6,
78 seed=None,
79 ):
80 super().__init__()
81 self.k = k
82 self.alpha = alpha
83 self.seed = seed
84
85 def __call__(
86 self,
87 next,
88 prompt,
89 cache=None,
90 index=0,
91 mask=None,
92 end_token_id=None,
93 hidden_states=None,
94 ):
95 if hidden_states is None:
96 raise ValueError(
97 "`ContrastiveSampler` requires passing a `hidden_states`, but"
98 "received `None`."
99 )
100 batch_size, max_length = tf.shape(prompt)[0], tf.shape(prompt)[1]
101 # Make sure max length and start index are the same dtype.
102 index = tf.cast(index, max_length.dtype)
103
104 def create_beams(x):
105 """Add initial beam state."""
106 x = tf.repeat(x, self.k, axis=0)
107 flat_shape = [batch_size * self.k] + x.shape.as_list()[1:]
108 return tf.reshape(x, shape=flat_shape)
109
110 def flatten_beams(x):
111 """Combine the beam dim and batch dim."""
112 flat_shape = [batch_size * self.k] + x.shape.as_list()[2:]
113 return tf.reshape(x, shape=flat_shape)
114
115 def unflatten_beams(x):
116 """Separate the beam dim and batch dim."""
117 unflat_shape = [batch_size, self.k] + x.shape.as_list()[1:]
118 return tf.reshape(x, shape=unflat_shape)
119
120 mask = tf.zeros_like(prompt, dtype=tf.bool) if mask is None else mask
121 # Compute initial logits.
122 logits, _, cache = next(prompt, cache, index)
123 # `tf.while_loop` will not accept `None` as a value for `loop_vars`.
124 cache = () if cache is None else cache
125
126 def cond(prompt, cache, index, logits, hidden_states):
127 if end_token_id is None:
128 return True
129 # Stop if all sequences have produced a *new* end_token_id.
130 end_tokens = (prompt == end_token_id) & (~mask)
131 prompt_done = tf.reduce_any(end_tokens, axis=-1)
132 return not tf.reduce_all(prompt_done)
133
134 def body(prompt, cache, index, logits, hidden_states):
135 # Compute the softmax distribution for the next token.
136 probabilities = keras.activations.softmax(logits)
137
138 # Replicate for `self.k` times to find the best token in top-k
139 # candidates.
140 prompt_beams = create_beams(prompt)
141 mask_beams = create_beams(mask)
142 hidden_states_beams = create_beams(hidden_states)
143 cache_beams = tf.nest.map_structure(create_beams, cache)
144
145 # Get top-k candidate tokens and their probabilities.
146 top_k_probabilities, top_k_indices = tf.math.top_k(
147 probabilities, k=self.k, sorted=False
148 )
149 next_token_probabilities = flatten_beams(top_k_probabilities)
150 next_token = flatten_beams(top_k_indices)
151 next_token = tf.cast(next_token, prompt.dtype)
152 next_token = tf.where(
153 mask_beams[:, index], prompt_beams[:, index], next_token
154 )
155
156 # Update the prompt with the next token.
157 next_token = next_token[:, tf.newaxis]
158 prompt_beams = dynamic_update_slice(
159 prompt_beams, next_token, [0, index]
160 )
161
162 # Compute the logits and hidden states for top-k candidate tokens.
163 next_logits, next_hidden_states_beams, cache_beams = next(
164 prompt_beams, cache_beams, index + 1
165 )
166
167 # Compute the max similarity score for top-k candidate tokens
168 # against previous tokens.
169 similarity_scores = self.similarity(
170 hidden_states_beams, next_hidden_states_beams
171 )
172 max_similarity_scores = tf.cast(
173 tf.reduce_max(similarity_scores[:, :index], axis=1),
174 dtype=next_token_probabilities.dtype,
175 )
176 if index == 0:
177 # If the index is 0, there is no previous states so we set
178 # `max_similarity_scores` the same for all beams.
179 max_similarity_scores = tf.zeros_like(max_similarity_scores)
180 # The final score of each candidate token is weighted sum of
181 # probability and similarity against previous tokens.
182 accumulated_scores = (
183 (1 - self.alpha) * next_token_probabilities
184 - self.alpha * max_similarity_scores
185 )
186 # Unflatten varibles to shape [batch_size, self.k, ...] for
187 # gather purpose.
188 unflat_score = unflatten_beams(accumulated_scores)
189 unflat_prompt = unflatten_beams(prompt_beams)
190 unflat_next_logits = unflatten_beams(next_logits)
191 unflat_next_hidden_states = unflatten_beams(
192 next_hidden_states_beams
193 )
194 unflat_cache = tf.nest.map_structure(unflatten_beams, cache_beams)
195 best_token_indices = tf.math.argmax(unflat_score, axis=1)
196
197 def gather_best_token(beams):
198 return tf.gather(
199 beams,
200 best_token_indices,
201 axis=1,
202 batch_dims=1,
203 )
204
205 prompt = gather_best_token(unflat_prompt)
206 # We avoid recomputing forward pass for each token by updating the
207 # cache/hidden_states using the output, and pass the logits to
208 # next iteration step.
209 logits = gather_best_token(unflat_next_logits)
210 next_hidden_states = gather_best_token(unflat_next_hidden_states)
211 cache = tf.nest.map_structure(gather_best_token, unflat_cache)
212
213 hidden_states = dynamic_update_slice(
214 hidden_states,
215 next_hidden_states[:, tf.newaxis, :],
216 [0, index, 0],
217 )
218 return (prompt, cache, index + 1, logits, hidden_states)
219
220 prompt, _, _, _, _ = tf.while_loop(
221 cond=cond,
222 body=body,
223 loop_vars=(prompt, cache, index, logits, hidden_states),
224 maximum_iterations=(max_length - index),
225 )
226 return prompt
227
228 def similarity(self, h1, h2):
229 h2 = h2[..., tf.newaxis]
230 return tf.squeeze(tf.matmul(h1, h2), axis=-1) / (
231 tf.norm(h1, axis=-1) * tf.norm(h2, axis=-2)
232 )
233
234 def get_config(self):
235 config = super().get_config()
236 config.update(
237 {
238 "k": self.k,
239 "alpha": self.alpha,
240 "seed": self.seed,
241 }
242 )
243 return config
244
[end of keras_nlp/samplers/contrastive_sampler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/keras_nlp/samplers/contrastive_sampler.py b/keras_nlp/samplers/contrastive_sampler.py
--- a/keras_nlp/samplers/contrastive_sampler.py
+++ b/keras_nlp/samplers/contrastive_sampler.py
@@ -76,8 +76,9 @@
k=5,
alpha=0.6,
seed=None,
+ **kwargs,
):
- super().__init__()
+ super().__init__(**kwargs)
self.k = k
self.alpha = alpha
self.seed = seed
@@ -133,7 +134,7 @@
def body(prompt, cache, index, logits, hidden_states):
# Compute the softmax distribution for the next token.
- probabilities = keras.activations.softmax(logits)
+ probabilities = keras.activations.softmax(logits / self.temperature)
# Replicate for `self.k` times to find the best token in top-k
# candidates.
|
{"golden_diff": "diff --git a/keras_nlp/samplers/contrastive_sampler.py b/keras_nlp/samplers/contrastive_sampler.py\n--- a/keras_nlp/samplers/contrastive_sampler.py\n+++ b/keras_nlp/samplers/contrastive_sampler.py\n@@ -76,8 +76,9 @@\n k=5,\n alpha=0.6,\n seed=None,\n+ **kwargs,\n ):\n- super().__init__()\n+ super().__init__(**kwargs)\n self.k = k\n self.alpha = alpha\n self.seed = seed\n@@ -133,7 +134,7 @@\n \n def body(prompt, cache, index, logits, hidden_states):\n # Compute the softmax distribution for the next token.\n- probabilities = keras.activations.softmax(logits)\n+ probabilities = keras.activations.softmax(logits / self.temperature)\n \n # Replicate for `self.k` times to find the best token in top-k\n # candidates.\n", "issue": "Add a temperature argument to the Contrastive Sampler \nSimilar to our other samplers, we should add the temperature argument to the `Contrastive Sampler` along with the necessary tests.\r\nThe approach for accomplishing the same can be seen in #951. \r\n\r\nI would like to take this up. Thanks!\r\n\n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contrastive Sampler.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.samplers.sampler import Sampler\nfrom keras_nlp.samplers.sampler import call_args_docstring\nfrom keras_nlp.utils.python_utils import format_docstring\n\n\n@format_docstring(call_args=call_args_docstring)\n@keras_nlp_export(\"keras_nlp.samplers.ContrastiveSampler\")\nclass ContrastiveSampler(Sampler):\n \"\"\"Contrastive Sampler class.\n\n This sampler implements contrastive search algorithm. In short, the sampler\n chooses the token having the max \"score\" as the next token. The \"score\" is\n a weighted sum between token's probability and max similarity against\n previous tokens. By using this joint score, contrastive sampler reduces the\n behavior of duplicating seen tokens.\n\n Args:\n k: int, the `k` value of top-k. Next token will be chosen from k tokens.\n alpha: float, the weight of minus max similarity in joint score\n computation. The larger the value of `alpha`, the score relies more\n on the similarity than the token probability.\n seed: int, defaults to None. The random seed.\n\n Call Args:\n {{call_args}}\n\n Examples:\n ```python\n # Use a simple alphabet of lowercase characters to [0, 26).\n int_lookup = {i: chr(i + ord(\"a\")) for i in range(26)}\n char_lookup = {v: k for k, v in int_lookup.items()}\n batch_size, length, vocab_size = 1, 12, len(int_lookup)\n hidden_size = 5\n index = 5\n\n def next(prompt, cache, index):\n prompt_batch_size = tf.shape(prompt)[0]\n hidden_states = tf.ones((prompt_batch_size, hidden_size))\n # A uniform distribution over our alphabet.\n logits = tf.ones((prompt_batch_size, vocab_size))\n return logits, hidden_states, cache\n\n output = keras_nlp.samplers.ContrastiveSampler()(\n next=next,\n prompt=tf.fill((batch_size, length), char_lookup[\"z\"]),\n index=index,\n hidden_states=tf.ones([batch_size, index, hidden_size]),\n )\n print([\"\".join([int_lookup[i] for i in s]) for s in output.numpy()])\n # >>> \"zzzzzeeeeeee\"\n ```\n \"\"\"\n\n def __init__(\n self,\n k=5,\n alpha=0.6,\n seed=None,\n ):\n super().__init__()\n self.k = k\n self.alpha = alpha\n self.seed = seed\n\n def __call__(\n self,\n next,\n prompt,\n cache=None,\n index=0,\n mask=None,\n end_token_id=None,\n hidden_states=None,\n ):\n if hidden_states is None:\n raise ValueError(\n \"`ContrastiveSampler` requires passing a `hidden_states`, but\"\n \"received `None`.\"\n )\n batch_size, max_length = tf.shape(prompt)[0], tf.shape(prompt)[1]\n # Make sure max length and start index are the same dtype.\n index = tf.cast(index, max_length.dtype)\n\n def create_beams(x):\n \"\"\"Add initial beam state.\"\"\"\n x = tf.repeat(x, self.k, axis=0)\n flat_shape = [batch_size * self.k] + x.shape.as_list()[1:]\n return tf.reshape(x, shape=flat_shape)\n\n def flatten_beams(x):\n \"\"\"Combine the beam dim and batch dim.\"\"\"\n flat_shape = [batch_size * self.k] + x.shape.as_list()[2:]\n return tf.reshape(x, shape=flat_shape)\n\n def unflatten_beams(x):\n \"\"\"Separate the beam dim and batch dim.\"\"\"\n unflat_shape = [batch_size, self.k] + x.shape.as_list()[1:]\n return tf.reshape(x, shape=unflat_shape)\n\n mask = tf.zeros_like(prompt, dtype=tf.bool) if mask is None else mask\n # Compute initial logits.\n logits, _, cache = next(prompt, cache, index)\n # `tf.while_loop` will not accept `None` as a value for `loop_vars`.\n cache = () if cache is None else cache\n\n def cond(prompt, cache, index, logits, hidden_states):\n if end_token_id is None:\n return True\n # Stop if all sequences have produced a *new* end_token_id.\n end_tokens = (prompt == end_token_id) & (~mask)\n prompt_done = tf.reduce_any(end_tokens, axis=-1)\n return not tf.reduce_all(prompt_done)\n\n def body(prompt, cache, index, logits, hidden_states):\n # Compute the softmax distribution for the next token.\n probabilities = keras.activations.softmax(logits)\n\n # Replicate for `self.k` times to find the best token in top-k\n # candidates.\n prompt_beams = create_beams(prompt)\n mask_beams = create_beams(mask)\n hidden_states_beams = create_beams(hidden_states)\n cache_beams = tf.nest.map_structure(create_beams, cache)\n\n # Get top-k candidate tokens and their probabilities.\n top_k_probabilities, top_k_indices = tf.math.top_k(\n probabilities, k=self.k, sorted=False\n )\n next_token_probabilities = flatten_beams(top_k_probabilities)\n next_token = flatten_beams(top_k_indices)\n next_token = tf.cast(next_token, prompt.dtype)\n next_token = tf.where(\n mask_beams[:, index], prompt_beams[:, index], next_token\n )\n\n # Update the prompt with the next token.\n next_token = next_token[:, tf.newaxis]\n prompt_beams = dynamic_update_slice(\n prompt_beams, next_token, [0, index]\n )\n\n # Compute the logits and hidden states for top-k candidate tokens.\n next_logits, next_hidden_states_beams, cache_beams = next(\n prompt_beams, cache_beams, index + 1\n )\n\n # Compute the max similarity score for top-k candidate tokens\n # against previous tokens.\n similarity_scores = self.similarity(\n hidden_states_beams, next_hidden_states_beams\n )\n max_similarity_scores = tf.cast(\n tf.reduce_max(similarity_scores[:, :index], axis=1),\n dtype=next_token_probabilities.dtype,\n )\n if index == 0:\n # If the index is 0, there is no previous states so we set\n # `max_similarity_scores` the same for all beams.\n max_similarity_scores = tf.zeros_like(max_similarity_scores)\n # The final score of each candidate token is weighted sum of\n # probability and similarity against previous tokens.\n accumulated_scores = (\n (1 - self.alpha) * next_token_probabilities\n - self.alpha * max_similarity_scores\n )\n # Unflatten varibles to shape [batch_size, self.k, ...] for\n # gather purpose.\n unflat_score = unflatten_beams(accumulated_scores)\n unflat_prompt = unflatten_beams(prompt_beams)\n unflat_next_logits = unflatten_beams(next_logits)\n unflat_next_hidden_states = unflatten_beams(\n next_hidden_states_beams\n )\n unflat_cache = tf.nest.map_structure(unflatten_beams, cache_beams)\n best_token_indices = tf.math.argmax(unflat_score, axis=1)\n\n def gather_best_token(beams):\n return tf.gather(\n beams,\n best_token_indices,\n axis=1,\n batch_dims=1,\n )\n\n prompt = gather_best_token(unflat_prompt)\n # We avoid recomputing forward pass for each token by updating the\n # cache/hidden_states using the output, and pass the logits to\n # next iteration step.\n logits = gather_best_token(unflat_next_logits)\n next_hidden_states = gather_best_token(unflat_next_hidden_states)\n cache = tf.nest.map_structure(gather_best_token, unflat_cache)\n\n hidden_states = dynamic_update_slice(\n hidden_states,\n next_hidden_states[:, tf.newaxis, :],\n [0, index, 0],\n )\n return (prompt, cache, index + 1, logits, hidden_states)\n\n prompt, _, _, _, _ = tf.while_loop(\n cond=cond,\n body=body,\n loop_vars=(prompt, cache, index, logits, hidden_states),\n maximum_iterations=(max_length - index),\n )\n return prompt\n\n def similarity(self, h1, h2):\n h2 = h2[..., tf.newaxis]\n return tf.squeeze(tf.matmul(h1, h2), axis=-1) / (\n tf.norm(h1, axis=-1) * tf.norm(h2, axis=-2)\n )\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"k\": self.k,\n \"alpha\": self.alpha,\n \"seed\": self.seed,\n }\n )\n return config\n", "path": "keras_nlp/samplers/contrastive_sampler.py"}]}
| 3,344 | 219 |
gh_patches_debug_1704
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2972
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FTS doesn't perform OCR on attachments for keyword extraction
If one send a PDF composed of images, no keywords will be extracted/indexed.
To fix that, we could do some OCR:
- https://github.com/tesseract-ocr/tesseract
or if we want to get fancy, something like
- https://github.com/JaidedAI/EasyOCR
Or the built-in integration with
- https://tika.apache.org/
There is a security argument going for that option: it's written in java (memory safe language) and would remove the need for decode2text and friends.
This is a follow-up to #2184 and #2972
</issue>
<code>
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 import ipaddress
5
6 DEFAULT_CONFIG = {
7 # Specific to the admin UI
8 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
9 'BABEL_DEFAULT_LOCALE': 'en',
10 'BABEL_DEFAULT_TIMEZONE': 'UTC',
11 'BOOTSTRAP_SERVE_LOCAL': True,
12 'RATELIMIT_STORAGE_URL': '',
13 'DEBUG': False,
14 'DEBUG_PROFILER': False,
15 'DEBUG_TB_INTERCEPT_REDIRECTS': False,
16 'DEBUG_ASSETS': '',
17 'DOMAIN_REGISTRATION': False,
18 'TEMPLATES_AUTO_RELOAD': True,
19 'MEMORY_SESSIONS': False,
20 'FETCHMAIL_ENABLED': True,
21 'MAILU_VERSION': 'unknown',
22 # Database settings
23 'DB_FLAVOR': None,
24 'DB_USER': 'mailu',
25 'DB_PW': None,
26 'DB_HOST': 'database',
27 'DB_NAME': 'mailu',
28 'SQLITE_DATABASE_FILE': 'data/main.db',
29 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
30 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',
31 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
32 # Statistics management
33 'INSTANCE_ID_PATH': '/data/instance',
34 'STATS_ENDPOINT': '20.{}.stats.mailu.io',
35 # Common configuration variables
36 'SECRET_KEY': 'changeMe',
37 'DOMAIN': 'mailu.io',
38 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
39 'POSTMASTER': 'postmaster',
40 'WILDCARD_SENDERS': '',
41 'TLS_FLAVOR': 'cert',
42 'INBOUND_TLS_ENFORCE': False,
43 'DEFER_ON_TLS_ERROR': True,
44 'AUTH_RATELIMIT_IP': '5/hour',
45 'AUTH_RATELIMIT_IP_V4_MASK': 24,
46 'AUTH_RATELIMIT_IP_V6_MASK': 48,
47 'AUTH_RATELIMIT_USER': '50/day',
48 'AUTH_RATELIMIT_EXEMPTION': '',
49 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
50 'DISABLE_STATISTICS': False,
51 # Mail settings
52 'DMARC_RUA': None,
53 'DMARC_RUF': None,
54 'WELCOME': False,
55 'WELCOME_SUBJECT': 'Dummy welcome topic',
56 'WELCOME_BODY': 'Dummy welcome body',
57 'DKIM_SELECTOR': 'dkim',
58 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
59 'DEFAULT_QUOTA': 1000000000,
60 'MESSAGE_RATELIMIT': '200/day',
61 'MESSAGE_RATELIMIT_EXEMPTION': '',
62 'RECIPIENT_DELIMITER': '',
63 # Web settings
64 'SITENAME': 'Mailu',
65 'WEBSITE': 'https://mailu.io',
66 'ADMIN': 'none',
67 'WEB_ADMIN': '/admin',
68 'WEB_WEBMAIL': '/webmail',
69 'WEBMAIL': 'none',
70 'RECAPTCHA_PUBLIC_KEY': '',
71 'RECAPTCHA_PRIVATE_KEY': '',
72 'LOGO_URL': None,
73 'LOGO_BACKGROUND': None,
74 # Advanced settings
75 'API': False,
76 'WEB_API': '/api',
77 'API_TOKEN': None,
78 'LOG_LEVEL': 'INFO',
79 'SESSION_KEY_BITS': 128,
80 'SESSION_TIMEOUT': 3600,
81 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
82 'SESSION_COOKIE_SECURE': None,
83 'CREDENTIAL_ROUNDS': 12,
84 'TLS_PERMISSIVE': True,
85 'TZ': 'Etc/UTC',
86 'DEFAULT_SPAM_THRESHOLD': 80,
87 'PROXY_AUTH_WHITELIST': '',
88 'PROXY_AUTH_HEADER': 'X-Auth-Email',
89 'PROXY_AUTH_CREATE': False,
90 'PROXY_AUTH_LOGOUT_URL': None,
91 'SUBNET': '192.168.203.0/24',
92 'SUBNET6': None,
93 }
94
95 class ConfigManager:
96 """ Naive configuration manager that uses environment only
97 """
98
99 DB_TEMPLATES = {
100 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
101 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
102 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
103 }
104
105 def __init__(self):
106 self.config = dict()
107
108 def __get_env(self, key, value):
109 key_file = key + "_FILE"
110 if key_file in os.environ:
111 with open(os.environ.get(key_file)) as file:
112 value_from_file = file.read()
113 return value_from_file.strip()
114 else:
115 return os.environ.get(key, value)
116
117 def __coerce_value(self, value):
118 if isinstance(value, str) and value.lower() in ('true','yes'):
119 return True
120 elif isinstance(value, str) and value.lower() in ('false', 'no'):
121 return False
122 return value
123
124 def init_app(self, app):
125 # get current app config
126 self.config.update(app.config)
127 # get environment variables
128 for key in os.environ:
129 if key.endswith('_ADDRESS'):
130 self.config[key] = os.environ[key]
131
132 self.config.update({
133 key: self.__coerce_value(self.__get_env(key, value))
134 for key, value in DEFAULT_CONFIG.items()
135 })
136
137 # automatically set the sqlalchemy string
138 if self.config['DB_FLAVOR']:
139 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
140 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
141
142 if not self.config.get('RATELIMIT_STORAGE_URL'):
143 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
144
145 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
146 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
147 self.config['SESSION_COOKIE_HTTPONLY'] = True
148 if self.config['SESSION_COOKIE_SECURE'] is None:
149 self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'
150 self.config['SESSION_PERMANENT'] = True
151 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
152 self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])
153 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
154 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
155 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
156 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
157 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
158 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
159 self.config['HOSTNAMES'] = ','.join(hostnames)
160 self.config['HOSTNAME'] = hostnames[0]
161 self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
162 self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
163 try:
164 self.config['MAILU_VERSION'] = open('/version', 'r').read()
165 except FileNotFoundError:
166 pass
167
168 # update the app config
169 app.config.update(self.config)
170
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -75,6 +75,8 @@
'API': False,
'WEB_API': '/api',
'API_TOKEN': None,
+ 'FULL_TEXT_SEARCH': 'en',
+ 'FULL_TEXT_SEARCH_ATTACHMENTS': False,
'LOG_LEVEL': 'INFO',
'SESSION_KEY_BITS': 128,
'SESSION_TIMEOUT': 3600,
|
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -75,6 +75,8 @@\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n+ 'FULL_TEXT_SEARCH': 'en',\n+ 'FULL_TEXT_SEARCH_ATTACHMENTS': False,\n 'LOG_LEVEL': 'INFO',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n", "issue": "FTS doesn't perform OCR on attachments for keyword extraction\nIf one send a PDF composed of images, no keywords will be extracted/indexed.\r\n\r\nTo fix that, we could do some OCR:\r\n- https://github.com/tesseract-ocr/tesseract\r\n\r\nor if we want to get fancy, something like\r\n- https://github.com/JaidedAI/EasyOCR\r\n\r\nOr the built-in integration with\r\n- https://tika.apache.org/\r\nThere is a security argument going for that option: it's written in java (memory safe language) and would remove the need for decode2text and friends.\r\n\r\nThis is a follow-up to #2184 and #2972\n", "before_files": [{"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '20.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '5/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 48,\n 'AUTH_RATELIMIT_USER': '50/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'LOG_LEVEL': 'INFO',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'PROXY_AUTH_LOGOUT_URL': None,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n try:\n self.config['MAILU_VERSION'] = open('/version', 'r').read()\n except FileNotFoundError:\n pass\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}]}
| 2,833 | 120 |
gh_patches_debug_4750
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-1567
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DefaultWorker crashes in non-deterministic mode
```sh
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/rjulian/venv/lib/python3.5/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "examples/torch/sac_metaworldv2_test.py", line 102, in get_args
sac_metaworldv2_test()
File "/home/rjulian/garage/src/garage/experiment/experiment.py", line 553, in __call__
result = self.function(ctxt, **kwargs)
File "examples/torch/sac_metaworldv2_test.py", line 100, in sac_metaworldv2_test
runner.setup(algo=sac, env=env, sampler_cls=LocalSampler)
File "/home/rjulian/garage/src/garage/experiment/local_runner.py", line 268, in setup
worker_args=worker_args)
File "/home/rjulian/garage/src/garage/experiment/local_runner.py", line 215, in make_sampler
envs=self._env)
File "/home/rjulian/garage/src/garage/sampler/local_sampler.py", line 66, in from_worker_factory
return cls(worker_factory, agents, envs)
File "/home/rjulian/garage/src/garage/sampler/local_sampler.py", line 38, in __init__
worker_factory(i) for i in range(worker_factory.n_workers)
File "/home/rjulian/garage/src/garage/sampler/local_sampler.py", line 38, in <listcomp>
worker_factory(i) for i in range(worker_factory.n_workers)
File "/home/rjulian/garage/src/garage/sampler/worker_factory.py", line 109, in __call__
**self._worker_args)
File "/home/rjulian/garage/src/garage/sampler/default_worker.py", line 51, in __init__
self.worker_init()
File "/home/rjulian/garage/src/garage/sampler/default_worker.py", line 55, in worker_init
deterministic.set_seed(self._seed + self._worker_number)
TypeError: unsupported operand type(s) for +: 'NoneType' and 'int'
```
</issue>
<code>
[start of src/garage/sampler/default_worker.py]
1 """Default Worker class."""
2 from collections import defaultdict
3
4 import gym
5 import numpy as np
6
7 from garage import TrajectoryBatch
8 from garage.experiment import deterministic
9 from garage.sampler.env_update import EnvUpdate
10 from garage.sampler.worker import Worker
11
12
13 class DefaultWorker(Worker):
14 """Initialize a worker.
15
16 Args:
17 seed(int): The seed to use to intialize random number generators.
18 max_path_length(int or float): The maximum length paths which will
19 be sampled. Can be (floating point) infinity.
20 worker_number(int): The number of the worker where this update is
21 occurring. This argument is used to set a different seed for each
22 worker.
23
24 Attributes:
25 agent(Policy or None): The worker's agent.
26 env(gym.Env or None): The worker's environment.
27
28 """
29
30 def __init__(
31 self,
32 *, # Require passing by keyword, since everything's an int.
33 seed,
34 max_path_length,
35 worker_number):
36 super().__init__(seed=seed,
37 max_path_length=max_path_length,
38 worker_number=worker_number)
39 self.agent = None
40 self.env = None
41 self._observations = []
42 self._last_observations = []
43 self._actions = []
44 self._rewards = []
45 self._terminals = []
46 self._lengths = []
47 self._agent_infos = defaultdict(list)
48 self._env_infos = defaultdict(list)
49 self._prev_obs = None
50 self._path_length = 0
51 self.worker_init()
52
53 def worker_init(self):
54 """Initialize a worker."""
55 deterministic.set_seed(self._seed + self._worker_number)
56
57 def update_agent(self, agent_update):
58 """Update an agent, assuming it implements garage.Policy.
59
60 Args:
61 agent_update (np.ndarray or dict or garage.Policy): If a
62 tuple, dict, or np.ndarray, these should be parameters to
63 agent, which should have been generated by calling
64 `policy.get_param_values`. Alternatively, a policy itself. Note
65 that other implementations of `Worker` may take different types
66 for this parameter.
67
68 """
69 if isinstance(agent_update, (dict, tuple, np.ndarray)):
70 self.agent.set_param_values(agent_update)
71 elif agent_update is not None:
72 self.agent = agent_update
73
74 def update_env(self, env_update):
75 """Use any non-None env_update as a new environment.
76
77 A simple env update function. If env_update is not None, it should be
78 the complete new environment.
79
80 This allows changing environments by passing the new environment as
81 `env_update` into `obtain_samples`.
82
83 Args:
84 env_update(gym.Env or EnvUpdate or None): The environment to
85 replace the existing env with. Note that other implementations
86 of `Worker` may take different types for this parameter.
87
88 Raises:
89 TypeError: If env_update is not one of the documented types.
90
91 """
92 if env_update is not None:
93 if isinstance(env_update, EnvUpdate):
94 self.env = env_update(self.env)
95 elif isinstance(env_update, gym.Env):
96 if self.env is not None:
97 self.env.close()
98 self.env = env_update
99 else:
100 raise TypeError('Uknown environment update type.')
101
102 def start_rollout(self):
103 """Begin a new rollout."""
104 self._path_length = 0
105 self._prev_obs = self.env.reset()
106 self.agent.reset()
107
108 def step_rollout(self):
109 """Take a single time-step in the current rollout.
110
111 Returns:
112 bool: True iff the path is done, either due to the environment
113 indicating termination of due to reaching `max_path_length`.
114
115 """
116 if self._path_length < self._max_path_length:
117 a, agent_info = self.agent.get_action(self._prev_obs)
118 next_o, r, d, env_info = self.env.step(a)
119 self._observations.append(self._prev_obs)
120 self._rewards.append(r)
121 self._actions.append(a)
122 for k, v in agent_info.items():
123 self._agent_infos[k].append(v)
124 for k, v in env_info.items():
125 self._env_infos[k].append(v)
126 self._path_length += 1
127 self._terminals.append(d)
128 if not d:
129 self._prev_obs = next_o
130 return False
131 self._lengths.append(self._path_length)
132 self._last_observations.append(self._prev_obs)
133 return True
134
135 def collect_rollout(self):
136 """Collect the current rollout, clearing the internal buffer.
137
138 Returns:
139 garage.TrajectoryBatch: A batch of the trajectories completed since
140 the last call to collect_rollout().
141
142 """
143 observations = self._observations
144 self._observations = []
145 last_observations = self._last_observations
146 self._last_observations = []
147 actions = self._actions
148 self._actions = []
149 rewards = self._rewards
150 self._rewards = []
151 terminals = self._terminals
152 self._terminals = []
153 env_infos = self._env_infos
154 self._env_infos = defaultdict(list)
155 agent_infos = self._agent_infos
156 self._agent_infos = defaultdict(list)
157 for k, v in agent_infos.items():
158 agent_infos[k] = np.asarray(v)
159 for k, v in env_infos.items():
160 env_infos[k] = np.asarray(v)
161 lengths = self._lengths
162 self._lengths = []
163 return TrajectoryBatch(self.env.spec, np.asarray(observations),
164 np.asarray(last_observations),
165 np.asarray(actions), np.asarray(rewards),
166 np.asarray(terminals), dict(env_infos),
167 dict(agent_infos), np.asarray(lengths,
168 dtype='i'))
169
170 def rollout(self):
171 """Sample a single rollout of the agent in the environment.
172
173 Returns:
174 garage.TrajectoryBatch: The collected trajectory.
175
176 """
177 self.start_rollout()
178 while not self.step_rollout():
179 pass
180 return self.collect_rollout()
181
182 def shutdown(self):
183 """Close the worker's environment."""
184 self.env.close()
185
[end of src/garage/sampler/default_worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/garage/sampler/default_worker.py b/src/garage/sampler/default_worker.py
--- a/src/garage/sampler/default_worker.py
+++ b/src/garage/sampler/default_worker.py
@@ -52,7 +52,8 @@
def worker_init(self):
"""Initialize a worker."""
- deterministic.set_seed(self._seed + self._worker_number)
+ if self._seed is not None:
+ deterministic.set_seed(self._seed + self._worker_number)
def update_agent(self, agent_update):
"""Update an agent, assuming it implements garage.Policy.
|
{"golden_diff": "diff --git a/src/garage/sampler/default_worker.py b/src/garage/sampler/default_worker.py\n--- a/src/garage/sampler/default_worker.py\n+++ b/src/garage/sampler/default_worker.py\n@@ -52,7 +52,8 @@\n \n def worker_init(self):\n \"\"\"Initialize a worker.\"\"\"\n- deterministic.set_seed(self._seed + self._worker_number)\n+ if self._seed is not None:\n+ deterministic.set_seed(self._seed + self._worker_number)\n \n def update_agent(self, agent_update):\n \"\"\"Update an agent, assuming it implements garage.Policy.\n", "issue": "DefaultWorker crashes in non-deterministic mode\n```sh\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/rjulian/venv/lib/python3.5/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"examples/torch/sac_metaworldv2_test.py\", line 102, in get_args\r\n sac_metaworldv2_test()\r\n File \"/home/rjulian/garage/src/garage/experiment/experiment.py\", line 553, in __call__\r\n result = self.function(ctxt, **kwargs)\r\n File \"examples/torch/sac_metaworldv2_test.py\", line 100, in sac_metaworldv2_test\r\n runner.setup(algo=sac, env=env, sampler_cls=LocalSampler)\r\n File \"/home/rjulian/garage/src/garage/experiment/local_runner.py\", line 268, in setup\r\n worker_args=worker_args)\r\n File \"/home/rjulian/garage/src/garage/experiment/local_runner.py\", line 215, in make_sampler\r\n envs=self._env)\r\n File \"/home/rjulian/garage/src/garage/sampler/local_sampler.py\", line 66, in from_worker_factory\r\n return cls(worker_factory, agents, envs)\r\n File \"/home/rjulian/garage/src/garage/sampler/local_sampler.py\", line 38, in __init__\r\n worker_factory(i) for i in range(worker_factory.n_workers)\r\n File \"/home/rjulian/garage/src/garage/sampler/local_sampler.py\", line 38, in <listcomp>\r\n worker_factory(i) for i in range(worker_factory.n_workers)\r\n File \"/home/rjulian/garage/src/garage/sampler/worker_factory.py\", line 109, in __call__\r\n **self._worker_args)\r\n File \"/home/rjulian/garage/src/garage/sampler/default_worker.py\", line 51, in __init__\r\n self.worker_init()\r\n File \"/home/rjulian/garage/src/garage/sampler/default_worker.py\", line 55, in worker_init\r\n deterministic.set_seed(self._seed + self._worker_number)\r\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'int'\r\n```\n", "before_files": [{"content": "\"\"\"Default Worker class.\"\"\"\nfrom collections import defaultdict\n\nimport gym\nimport numpy as np\n\nfrom garage import TrajectoryBatch\nfrom garage.experiment import deterministic\nfrom garage.sampler.env_update import EnvUpdate\nfrom garage.sampler.worker import Worker\n\n\nclass DefaultWorker(Worker):\n \"\"\"Initialize a worker.\n\n Args:\n seed(int): The seed to use to intialize random number generators.\n max_path_length(int or float): The maximum length paths which will\n be sampled. Can be (floating point) infinity.\n worker_number(int): The number of the worker where this update is\n occurring. This argument is used to set a different seed for each\n worker.\n\n Attributes:\n agent(Policy or None): The worker's agent.\n env(gym.Env or None): The worker's environment.\n\n \"\"\"\n\n def __init__(\n self,\n *, # Require passing by keyword, since everything's an int.\n seed,\n max_path_length,\n worker_number):\n super().__init__(seed=seed,\n max_path_length=max_path_length,\n worker_number=worker_number)\n self.agent = None\n self.env = None\n self._observations = []\n self._last_observations = []\n self._actions = []\n self._rewards = []\n self._terminals = []\n self._lengths = []\n self._agent_infos = defaultdict(list)\n self._env_infos = defaultdict(list)\n self._prev_obs = None\n self._path_length = 0\n self.worker_init()\n\n def worker_init(self):\n \"\"\"Initialize a worker.\"\"\"\n deterministic.set_seed(self._seed + self._worker_number)\n\n def update_agent(self, agent_update):\n \"\"\"Update an agent, assuming it implements garage.Policy.\n\n Args:\n agent_update (np.ndarray or dict or garage.Policy): If a\n tuple, dict, or np.ndarray, these should be parameters to\n agent, which should have been generated by calling\n `policy.get_param_values`. Alternatively, a policy itself. Note\n that other implementations of `Worker` may take different types\n for this parameter.\n\n \"\"\"\n if isinstance(agent_update, (dict, tuple, np.ndarray)):\n self.agent.set_param_values(agent_update)\n elif agent_update is not None:\n self.agent = agent_update\n\n def update_env(self, env_update):\n \"\"\"Use any non-None env_update as a new environment.\n\n A simple env update function. If env_update is not None, it should be\n the complete new environment.\n\n This allows changing environments by passing the new environment as\n `env_update` into `obtain_samples`.\n\n Args:\n env_update(gym.Env or EnvUpdate or None): The environment to\n replace the existing env with. Note that other implementations\n of `Worker` may take different types for this parameter.\n\n Raises:\n TypeError: If env_update is not one of the documented types.\n\n \"\"\"\n if env_update is not None:\n if isinstance(env_update, EnvUpdate):\n self.env = env_update(self.env)\n elif isinstance(env_update, gym.Env):\n if self.env is not None:\n self.env.close()\n self.env = env_update\n else:\n raise TypeError('Uknown environment update type.')\n\n def start_rollout(self):\n \"\"\"Begin a new rollout.\"\"\"\n self._path_length = 0\n self._prev_obs = self.env.reset()\n self.agent.reset()\n\n def step_rollout(self):\n \"\"\"Take a single time-step in the current rollout.\n\n Returns:\n bool: True iff the path is done, either due to the environment\n indicating termination of due to reaching `max_path_length`.\n\n \"\"\"\n if self._path_length < self._max_path_length:\n a, agent_info = self.agent.get_action(self._prev_obs)\n next_o, r, d, env_info = self.env.step(a)\n self._observations.append(self._prev_obs)\n self._rewards.append(r)\n self._actions.append(a)\n for k, v in agent_info.items():\n self._agent_infos[k].append(v)\n for k, v in env_info.items():\n self._env_infos[k].append(v)\n self._path_length += 1\n self._terminals.append(d)\n if not d:\n self._prev_obs = next_o\n return False\n self._lengths.append(self._path_length)\n self._last_observations.append(self._prev_obs)\n return True\n\n def collect_rollout(self):\n \"\"\"Collect the current rollout, clearing the internal buffer.\n\n Returns:\n garage.TrajectoryBatch: A batch of the trajectories completed since\n the last call to collect_rollout().\n\n \"\"\"\n observations = self._observations\n self._observations = []\n last_observations = self._last_observations\n self._last_observations = []\n actions = self._actions\n self._actions = []\n rewards = self._rewards\n self._rewards = []\n terminals = self._terminals\n self._terminals = []\n env_infos = self._env_infos\n self._env_infos = defaultdict(list)\n agent_infos = self._agent_infos\n self._agent_infos = defaultdict(list)\n for k, v in agent_infos.items():\n agent_infos[k] = np.asarray(v)\n for k, v in env_infos.items():\n env_infos[k] = np.asarray(v)\n lengths = self._lengths\n self._lengths = []\n return TrajectoryBatch(self.env.spec, np.asarray(observations),\n np.asarray(last_observations),\n np.asarray(actions), np.asarray(rewards),\n np.asarray(terminals), dict(env_infos),\n dict(agent_infos), np.asarray(lengths,\n dtype='i'))\n\n def rollout(self):\n \"\"\"Sample a single rollout of the agent in the environment.\n\n Returns:\n garage.TrajectoryBatch: The collected trajectory.\n\n \"\"\"\n self.start_rollout()\n while not self.step_rollout():\n pass\n return self.collect_rollout()\n\n def shutdown(self):\n \"\"\"Close the worker's environment.\"\"\"\n self.env.close()\n", "path": "src/garage/sampler/default_worker.py"}]}
| 2,964 | 134 |
gh_patches_debug_11199
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1937
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When attempting to generate a Finnish ssn of a specific age the provider crashes
* Faker version: 19.11.0
* OS: Windows 10
When attempting to generate a Finnish ssn of a certain age the provider crashes because ``datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))`` is always trying to generate a time within range. The ssn provider should handle situations where min_age and max_age are the same.
For example:
```
if min_age == max_age:
age = datetime.timedelta(days=min_age * 365)
else:
age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
```
### Steps to reproduce
1. Create a faker with fi_FI locale: ``fake = Faker('fi_FI')``
2. Call the ssn provider with the same minimum and maximum age: ``fake.ssn(min_age=50, max_age=50, artificial=True)``
### Expected behavior
The faker generates an artificial Finnish ssn that is 50 years old.
### Actual behavior
``ValueError: empty range for randrange() (18250, 18250, 0)``
</issue>
<code>
[start of faker/providers/ssn/fi_FI/__init__.py]
1 import datetime
2
3 from .. import Provider as SsnProvider
4
5
6 class Provider(SsnProvider):
7 def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:
8 """
9 Returns 11 character Finnish personal identity code (Henkilötunnus,
10 HETU, Swedish: Personbeteckning). This function assigns random
11 gender to person.
12
13 HETU consists of eleven characters of the form DDMMYYCZZZQ, where
14 DDMMYY is the date of birth, C the century sign, ZZZ the individual
15 number and Q the control character (checksum). The sign for the
16 century is either + (1800–1899), - (1900–1999), or A (2000–2099).
17 The individual number ZZZ is odd for males and even for females.
18 For people born in Finland its range is 002-899
19 (larger numbers may be used in special cases).
20 An example of a valid code is 311280-888Y.
21
22 https://en.wikipedia.org/wiki/National_identification_number#Finland
23 """
24
25 def _checksum(hetu):
26 checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
27 return checksum_characters[int(hetu) % 31]
28
29 age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
30 birthday = datetime.date.today() - age
31 hetu_date = "%02d%02d%s" % (
32 birthday.day,
33 birthday.month,
34 str(birthday.year)[-2:],
35 )
36 range = (900, 999) if artificial is True else (2, 899)
37 suffix = str(self.generator.random.randrange(*range)).zfill(3)
38 checksum = _checksum(hetu_date + suffix)
39 separator = self._get_century_code(birthday.year)
40 hetu = "".join([hetu_date, separator, suffix, checksum])
41 return hetu
42
43 @staticmethod
44 def _get_century_code(year: int) -> str:
45 """Returns the century code for a given year"""
46 if 2000 <= year < 3000:
47 separator = "A"
48 elif 1900 <= year < 2000:
49 separator = "-"
50 elif 1800 <= year < 1900:
51 separator = "+"
52 else:
53 raise ValueError("Finnish SSN do not support people born before the year 1800 or after the year 2999")
54 return separator
55
56 vat_id_formats = ("FI########",)
57
58 def vat_id(self) -> str:
59 """
60 http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
61 :return: A random Finnish VAT ID
62 """
63 return self.bothify(self.random_element(self.vat_id_formats))
64
[end of faker/providers/ssn/fi_FI/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/providers/ssn/fi_FI/__init__.py b/faker/providers/ssn/fi_FI/__init__.py
--- a/faker/providers/ssn/fi_FI/__init__.py
+++ b/faker/providers/ssn/fi_FI/__init__.py
@@ -26,7 +26,10 @@
checksum_characters = "0123456789ABCDEFHJKLMNPRSTUVWXY"
return checksum_characters[int(hetu) % 31]
- age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
+ if min_age == max_age:
+ age = datetime.timedelta(days=min_age * 365)
+ else:
+ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))
birthday = datetime.date.today() - age
hetu_date = "%02d%02d%s" % (
birthday.day,
|
{"golden_diff": "diff --git a/faker/providers/ssn/fi_FI/__init__.py b/faker/providers/ssn/fi_FI/__init__.py\n--- a/faker/providers/ssn/fi_FI/__init__.py\n+++ b/faker/providers/ssn/fi_FI/__init__.py\n@@ -26,7 +26,10 @@\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n \n- age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n+ if min_age == max_age:\n+ age = datetime.timedelta(days=min_age * 365)\n+ else:\n+ age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n", "issue": "When attempting to generate a Finnish ssn of a specific age the provider crashes\n* Faker version: 19.11.0\r\n* OS: Windows 10\r\n\r\nWhen attempting to generate a Finnish ssn of a certain age the provider crashes because ``datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))`` is always trying to generate a time within range. The ssn provider should handle situations where min_age and max_age are the same.\r\nFor example:\r\n```\r\nif min_age == max_age:\r\n age = datetime.timedelta(days=min_age * 365)\r\nelse:\r\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Create a faker with fi_FI locale: ``fake = Faker('fi_FI')``\r\n2. Call the ssn provider with the same minimum and maximum age: ``fake.ssn(min_age=50, max_age=50, artificial=True)``\r\n\r\n### Expected behavior\r\n\r\nThe faker generates an artificial Finnish ssn that is 50 years old. \r\n\r\n### Actual behavior\r\n\r\n``ValueError: empty range for randrange() (18250, 18250, 0)``\r\n\n", "before_files": [{"content": "import datetime\n\nfrom .. import Provider as SsnProvider\n\n\nclass Provider(SsnProvider):\n def ssn(self, min_age: int = 0, max_age: int = 105, artificial: bool = False) -> str:\n \"\"\"\n Returns 11 character Finnish personal identity code (Henkil\u00f6tunnus,\n HETU, Swedish: Personbeteckning). This function assigns random\n gender to person.\n\n HETU consists of eleven characters of the form DDMMYYCZZZQ, where\n DDMMYY is the date of birth, C the century sign, ZZZ the individual\n number and Q the control character (checksum). The sign for the\n century is either + (1800\u20131899), - (1900\u20131999), or A (2000\u20132099).\n The individual number ZZZ is odd for males and even for females.\n For people born in Finland its range is 002-899\n (larger numbers may be used in special cases).\n An example of a valid code is 311280-888Y.\n\n https://en.wikipedia.org/wiki/National_identification_number#Finland\n \"\"\"\n\n def _checksum(hetu):\n checksum_characters = \"0123456789ABCDEFHJKLMNPRSTUVWXY\"\n return checksum_characters[int(hetu) % 31]\n\n age = datetime.timedelta(days=self.generator.random.randrange(min_age * 365, max_age * 365))\n birthday = datetime.date.today() - age\n hetu_date = \"%02d%02d%s\" % (\n birthday.day,\n birthday.month,\n str(birthday.year)[-2:],\n )\n range = (900, 999) if artificial is True else (2, 899)\n suffix = str(self.generator.random.randrange(*range)).zfill(3)\n checksum = _checksum(hetu_date + suffix)\n separator = self._get_century_code(birthday.year)\n hetu = \"\".join([hetu_date, separator, suffix, checksum])\n return hetu\n\n @staticmethod\n def _get_century_code(year: int) -> str:\n \"\"\"Returns the century code for a given year\"\"\"\n if 2000 <= year < 3000:\n separator = \"A\"\n elif 1900 <= year < 2000:\n separator = \"-\"\n elif 1800 <= year < 1900:\n separator = \"+\"\n else:\n raise ValueError(\"Finnish SSN do not support people born before the year 1800 or after the year 2999\")\n return separator\n\n vat_id_formats = (\"FI########\",)\n\n def vat_id(self) -> str:\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Finnish VAT ID\n \"\"\"\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/fi_FI/__init__.py"}]}
| 1,639 | 230 |
gh_patches_debug_25749
|
rasdani/github-patches
|
git_diff
|
kymatio__kymatio-822
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scattering 2D doesn't work when using 2^J == image size (TensorFlow backend)
Similar issues have been reported before (#284, #363) and also fixed (#412) for Torch backend. However, still not working in the tensorflow backend:
test:
```python
import numpy as np
from kymatio.tensorflow import Scattering2D
scattering = Scattering2D(J=5, shape=(32, 32))
test_im = np.ones((1,1,32,32))
test = scattering.scattering(test_im)
```
Gives error:
```
7 scattering = Scattering2D(J=5, shape=(32, 32))
8 test_im = np.ones((1,1,32,32))
----> 9 test = scattering.scattering(test_im)
~/.local/lib/python3.9/site-packages/kymatio/scattering2d/frontend/tensorflow_frontend.py in scattering(self, input)
48 input = tf.reshape(input, tf.concat(((-1,), signal_shape), 0))
49
---> 50 S = scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.phi, self.psi,
51 self.max_order, self.out_type)
52
~/.local/lib/python3.9/site-packages/kymatio/scattering2d/core/scattering2d.py in scattering2d(x, pad, unpad, backend, J, L, phi, psi, max_order, out_type)
13 out_S_0, out_S_1, out_S_2 = [], [], []
14
---> 15 U_r = pad(x)
16
17 U_0_c = fft(U_r, 'C2C')
~/.local/lib/python3.9/site-packages/kymatio/scattering2d/backend/tensorflow_backend.py in __call__(self, x)
27 paddings = [[0, 0]] * len(x.shape[:-2])
28 paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]
---> 29 return tf.cast(tf.pad(x, paddings, mode="REFLECT"), tf.complex64)
30
31 def unpad(in_):
~/.local/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
~/.local/lib/python3.9/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
56 try:
57 ctx.ensure_initialized()
---> 58 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
59 inputs, attrs, num_outputs)
60 except core._NotOkStatusException as e:
InvalidArgumentError: paddings must be less than the dimension size: 32, 32 not less than 32 [Op:MirrorPad]
```
(speculation) So possibly problems with the order of the padding being different in tensorflow from torch.
Should also probably include some tests for these types of problems like the tests for implemented for Torch in #346
</issue>
<code>
[start of kymatio/scattering2d/backend/tensorflow_backend.py]
1 import tensorflow as tf
2
3 from ...backend.tensorflow_backend import TensorFlowBackend
4
5
6 class Pad(object):
7 def __init__(self, pad_size, input_size):
8 """
9 Padding which allows to simultaneously pad in a reflection fashion
10 and map to complex.
11 Parameters
12 ----------
13 pad_size : list of 4 integers
14 size of padding to apply.
15 input_size : list of 2 integers
16 size of the original signal
17 """
18 self.pad_size = pad_size
19
20 def __call__(self, x):
21 paddings = [[0, 0]] * len(x.shape[:-2])
22 paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]
23 return tf.pad(x, paddings, mode="REFLECT")
24
25
26 class TensorFlowBackend2D(TensorFlowBackend):
27 Pad = Pad
28
29 @staticmethod
30 def unpad(in_):
31 """
32 Slices the input tensor at indices between 1::-1
33 Parameters
34 ----------
35 in_ : tensor_like
36 input tensor
37 Returns
38 -------
39 in_[..., 1:-1, 1:-1]
40 """
41 return in_[..., 1:-1, 1:-1]
42
43
44 @classmethod
45 def rfft(cls, x):
46 cls.real_check(x)
47 return tf.signal.fft2d(tf.cast(x, tf.complex64), name='rfft2d')
48
49 @classmethod
50 def irfft(cls, x):
51 cls.complex_check(x)
52 return tf.math.real(tf.signal.ifft2d(x, name='irfft2d'))
53
54
55 @classmethod
56 def ifft(cls, x):
57 cls.complex_check(x)
58 return tf.signal.ifft2d(x, name='ifft2d')
59
60 @classmethod
61 def subsample_fourier(cls, x, k):
62 """ Subsampling of a 2D image performed in the Fourier domain.
63
64 Subsampling in the spatial domain amounts to periodization
65 in the Fourier domain, hence the formula.
66
67 Parameters
68 ----------
69 x : tensor_like
70 input tensor with at least three dimensions.
71 k : int
72 integer such that x is subsampled by k along the spatial variables.
73
74 Returns
75 -------
76 out : tensor_like
77 Tensor such that its Fourier transform is the Fourier
78 transform of a subsampled version of x, i.e. in
79 F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k]
80
81 """
82 cls.complex_check(x)
83
84 y = tf.reshape(x, (-1, k, x.shape[1] // k, k, x.shape[2] // k))
85
86 out = tf.reduce_mean(y, axis=(1, 3))
87 return out
88
89
90 backend = TensorFlowBackend2D
91
[end of kymatio/scattering2d/backend/tensorflow_backend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kymatio/scattering2d/backend/tensorflow_backend.py b/kymatio/scattering2d/backend/tensorflow_backend.py
--- a/kymatio/scattering2d/backend/tensorflow_backend.py
+++ b/kymatio/scattering2d/backend/tensorflow_backend.py
@@ -16,11 +16,34 @@
size of the original signal
"""
self.pad_size = pad_size
+ self.input_size = input_size
def __call__(self, x):
+ pad_size = list(self.pad_size)
+
+ # Clone to avoid passing on modifications.
+ new_pad_size = list(pad_size)
+
+ # This handles the case where the padding is equal to the image size.
+ if pad_size[0] == self.input_size[0]:
+ new_pad_size[0] -= 1
+ new_pad_size[1] -= 1
+ if pad_size[2] == self.input_size[1]:
+ new_pad_size[2] -= 1
+ new_pad_size[3] -= 1
+
paddings = [[0, 0]] * len(x.shape[:-2])
- paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]
- return tf.pad(x, paddings, mode="REFLECT")
+ paddings += [[new_pad_size[0], new_pad_size[1]], [new_pad_size[2], new_pad_size[3]]]
+
+ x_padded = tf.pad(x, paddings, mode="REFLECT")
+
+ # Again, special handling for when padding is the same as image size.
+ if pad_size[0] == self.input_size[0]:
+ x_padded = tf.concat([tf.expand_dims(x_padded[..., 1, :], axis=-2), x_padded, tf.expand_dims(x_padded[..., x_padded.shape[-2] -2, :], axis=-2)], axis=-2)
+ if pad_size[2] == self.input_size[1]:
+ x_padded = tf.concat([tf.expand_dims(x_padded[..., :, 1], axis=-1), x_padded, tf.expand_dims(x_padded[..., :, x_padded.shape[-1]-2], axis=-1)], axis=-1)
+
+ return x_padded
class TensorFlowBackend2D(TensorFlowBackend):
|
{"golden_diff": "diff --git a/kymatio/scattering2d/backend/tensorflow_backend.py b/kymatio/scattering2d/backend/tensorflow_backend.py\n--- a/kymatio/scattering2d/backend/tensorflow_backend.py\n+++ b/kymatio/scattering2d/backend/tensorflow_backend.py\n@@ -16,11 +16,34 @@\n size of the original signal\n \"\"\"\n self.pad_size = pad_size\n+ self.input_size = input_size\n \n def __call__(self, x):\n+ pad_size = list(self.pad_size)\n+\n+ # Clone to avoid passing on modifications.\n+ new_pad_size = list(pad_size)\n+\n+ # This handles the case where the padding is equal to the image size.\n+ if pad_size[0] == self.input_size[0]:\n+ new_pad_size[0] -= 1\n+ new_pad_size[1] -= 1\n+ if pad_size[2] == self.input_size[1]:\n+ new_pad_size[2] -= 1\n+ new_pad_size[3] -= 1\n+\n paddings = [[0, 0]] * len(x.shape[:-2])\n- paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\n- return tf.pad(x, paddings, mode=\"REFLECT\")\n+ paddings += [[new_pad_size[0], new_pad_size[1]], [new_pad_size[2], new_pad_size[3]]]\n+\n+ x_padded = tf.pad(x, paddings, mode=\"REFLECT\")\n+\n+ # Again, special handling for when padding is the same as image size.\n+ if pad_size[0] == self.input_size[0]:\n+ x_padded = tf.concat([tf.expand_dims(x_padded[..., 1, :], axis=-2), x_padded, tf.expand_dims(x_padded[..., x_padded.shape[-2] -2, :], axis=-2)], axis=-2)\n+ if pad_size[2] == self.input_size[1]:\n+ x_padded = tf.concat([tf.expand_dims(x_padded[..., :, 1], axis=-1), x_padded, tf.expand_dims(x_padded[..., :, x_padded.shape[-1]-2], axis=-1)], axis=-1)\n+\n+ return x_padded\n \n \n class TensorFlowBackend2D(TensorFlowBackend):\n", "issue": "Scattering 2D doesn't work when using 2^J == image size (TensorFlow backend)\nSimilar issues have been reported before (#284, #363) and also fixed (#412) for Torch backend. However, still not working in the tensorflow backend:\r\n\r\ntest:\r\n```python \r\nimport numpy as np\r\nfrom kymatio.tensorflow import Scattering2D\r\n\r\nscattering = Scattering2D(J=5, shape=(32, 32))\r\ntest_im = np.ones((1,1,32,32))\r\ntest = scattering.scattering(test_im)\r\n```\r\n\r\nGives error:\r\n```\r\n 7 scattering = Scattering2D(J=5, shape=(32, 32))\r\n 8 test_im = np.ones((1,1,32,32))\r\n----> 9 test = scattering.scattering(test_im)\r\n\r\n~/.local/lib/python3.9/site-packages/kymatio/scattering2d/frontend/tensorflow_frontend.py in scattering(self, input)\r\n 48 input = tf.reshape(input, tf.concat(((-1,), signal_shape), 0))\r\n 49 \r\n---> 50 S = scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.phi, self.psi,\r\n 51 self.max_order, self.out_type)\r\n 52 \r\n\r\n~/.local/lib/python3.9/site-packages/kymatio/scattering2d/core/scattering2d.py in scattering2d(x, pad, unpad, backend, J, L, phi, psi, max_order, out_type)\r\n 13 out_S_0, out_S_1, out_S_2 = [], [], []\r\n 14 \r\n---> 15 U_r = pad(x)\r\n 16 \r\n 17 U_0_c = fft(U_r, 'C2C')\r\n\r\n~/.local/lib/python3.9/site-packages/kymatio/scattering2d/backend/tensorflow_backend.py in __call__(self, x)\r\n 27 paddings = [[0, 0]] * len(x.shape[:-2])\r\n 28 paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\r\n---> 29 return tf.cast(tf.pad(x, paddings, mode=\"REFLECT\"), tf.complex64)\r\n 30 \r\n 31 def unpad(in_):\r\n\r\n~/.local/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)\r\n 151 except Exception as e:\r\n 152 filtered_tb = _process_traceback_frames(e.__traceback__)\r\n--> 153 raise e.with_traceback(filtered_tb) from None\r\n 154 finally:\r\n 155 del filtered_tb\r\n\r\n~/.local/lib/python3.9/site-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)\r\n 56 try:\r\n 57 ctx.ensure_initialized()\r\n---> 58 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\r\n 59 inputs, attrs, num_outputs)\r\n 60 except core._NotOkStatusException as e:\r\n\r\nInvalidArgumentError: paddings must be less than the dimension size: 32, 32 not less than 32 [Op:MirrorPad]\r\n```\r\n\r\n(speculation) So possibly problems with the order of the padding being different in tensorflow from torch. \r\n\r\nShould also probably include some tests for these types of problems like the tests for implemented for Torch in #346 \n", "before_files": [{"content": "import tensorflow as tf\n\nfrom ...backend.tensorflow_backend import TensorFlowBackend\n\n\nclass Pad(object):\n def __init__(self, pad_size, input_size):\n \"\"\"\n Padding which allows to simultaneously pad in a reflection fashion\n and map to complex.\n Parameters\n ----------\n pad_size : list of 4 integers\n size of padding to apply.\n input_size : list of 2 integers\n size of the original signal\n \"\"\"\n self.pad_size = pad_size\n\n def __call__(self, x):\n paddings = [[0, 0]] * len(x.shape[:-2])\n paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\n return tf.pad(x, paddings, mode=\"REFLECT\")\n\n\nclass TensorFlowBackend2D(TensorFlowBackend):\n Pad = Pad\n\n @staticmethod\n def unpad(in_):\n \"\"\"\n Slices the input tensor at indices between 1::-1\n Parameters\n ----------\n in_ : tensor_like\n input tensor\n Returns\n -------\n in_[..., 1:-1, 1:-1]\n \"\"\"\n return in_[..., 1:-1, 1:-1]\n\n\n @classmethod\n def rfft(cls, x):\n cls.real_check(x)\n return tf.signal.fft2d(tf.cast(x, tf.complex64), name='rfft2d')\n\n @classmethod\n def irfft(cls, x):\n cls.complex_check(x)\n return tf.math.real(tf.signal.ifft2d(x, name='irfft2d'))\n\n\n @classmethod\n def ifft(cls, x):\n cls.complex_check(x)\n return tf.signal.ifft2d(x, name='ifft2d')\n\n @classmethod\n def subsample_fourier(cls, x, k):\n \"\"\" Subsampling of a 2D image performed in the Fourier domain.\n\n Subsampling in the spatial domain amounts to periodization\n in the Fourier domain, hence the formula.\n\n Parameters\n ----------\n x : tensor_like\n input tensor with at least three dimensions.\n k : int\n integer such that x is subsampled by k along the spatial variables.\n\n Returns\n -------\n out : tensor_like\n Tensor such that its Fourier transform is the Fourier\n transform of a subsampled version of x, i.e. in\n F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k]\n\n \"\"\"\n cls.complex_check(x)\n\n y = tf.reshape(x, (-1, k, x.shape[1] // k, k, x.shape[2] // k))\n\n out = tf.reduce_mean(y, axis=(1, 3))\n return out\n\n\nbackend = TensorFlowBackend2D\n", "path": "kymatio/scattering2d/backend/tensorflow_backend.py"}]}
| 2,174 | 531 |
gh_patches_debug_17618
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-28
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix CSRF middleware
There is a [TODO in `settings.py` to 'fix the CSRF middleware'](https://github.com/CiviWiki/OpenCiviWiki/blob/dev/civiwiki/settings.py#L60). This issue is a placeholder to make sure we resolve the issue with CSRF middleware.
What is the issue that prevents us from using CSRF?
</issue>
<code>
[start of civiwiki/settings.py]
1 """
2 Django settings for civiwiki project.
3 Darius Calliet May 12, 2016
4
5 Production settings file to select proper environment variables.
6 """
7 import os
8
9 from django.core.exceptions import ImproperlyConfigured
10 import dj_database_url
11
12
13 def get_env_variable(environment_variable, optional=False):
14 """Get the environment variable or return exception"""
15 try:
16 return os.environ[environment_variable]
17 except KeyError:
18 if optional:
19 return ''
20 else:
21 error = "Environment variable '{ev}' not found.".format(ev=environment_variable)
22 raise ImproperlyConfigured(error)
23
24
25 # Devlopment Environment Control
26 DEBUG = 'DEBUG' in os.environ
27
28 if 'DJANGO_HOST' in os.environ:
29 DJANGO_HOST = get_env_variable("DJANGO_HOST")
30 else:
31 DJANGO_HOST = 'LOCALHOST'
32
33
34 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
35 SECRET_KEY = get_env_variable("DJANGO_SECRET_KEY")
36 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost"]
37
38
39 INSTALLED_APPS = (
40 'django.contrib.admin',
41 'django.contrib.auth',
42 'django.contrib.contenttypes',
43 'django.contrib.sessions',
44 'django.contrib.messages',
45 'django.contrib.staticfiles',
46 'storages',
47 'channels',
48 'civiwiki',
49 'api',
50 'authentication',
51 'frontend_views',
52 'notifications',
53 'legislation',
54 )
55
56
57 MIDDLEWARE_CLASSES = (
58 'django.contrib.sessions.middleware.SessionMiddleware',
59 'django.middleware.common.CommonMiddleware',
60 # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually
61 'django.contrib.auth.middleware.AuthenticationMiddleware',
62 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
63 'django.contrib.messages.middleware.MessageMiddleware',
64 'django.middleware.clickjacking.XFrameOptionsMiddleware',
65 'django.middleware.security.SecurityMiddleware',
66 )
67
68
69 CORS_ORIGIN_ALLOW_ALL = True
70 ROOT_URLCONF = 'civiwiki.urls'
71 LOGIN_URL = '/login'
72
73
74 # SSL Setup
75 if DJANGO_HOST is not 'LOCALHOST':
76 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
77 SECURE_SSL_REDIRECT = True
78 SESSION_COOKIE_SECURE = True
79 CSRF_COOKIE_SECURE = True
80
81
82 # Internationalization & Localization
83 LANGUAGE_CODE = 'en-us'
84 TIME_ZONE = 'UTC'
85 USE_I18N = True
86 USE_L10N = True
87 USE_TZ = True
88
89
90 TEMPLATES = [
91 {
92 'BACKEND': 'django.template.backends.django.DjangoTemplates',
93 'DIRS': [os.path.join(BASE_DIR, "webapp/templates")], #TODO: Add non-webapp template directory
94 'APP_DIRS': True,
95 'OPTIONS': {
96 'context_processors': [
97 'django.template.context_processors.debug',
98 'django.template.context_processors.request',
99 'django.contrib.auth.context_processors.auth',
100 'django.contrib.messages.context_processors.messages',
101 ],
102 },
103 },
104 ]
105
106
107 WSGI_APPLICATION = 'civiwiki.wsgi.application'
108
109
110 # Apex Contact for Production Errors
111 ADMINS = [('Development Team', '[email protected]')]
112
113
114 # API keys
115 SUNLIGHT_API_KEY = get_env_variable("SUNLIGHT_API_KEY")
116 GOOGLE_API_KEY = get_env_variable("GOOGLE_MAP_API_KEY")
117
118 # Channels Setup
119 if 'REDIS_URL' in os.environ:
120 REDIS_URL = get_env_variable("REDIS_URL")
121 else:
122 REDIS_URL = 'redis://localhost:6379'
123 CHANNEL_LAYERS = {
124 "default": {
125 "BACKEND": "asgi_redis.RedisChannelLayer",
126 "CONFIG": {
127 "hosts": [REDIS_URL],
128 },
129 "ROUTING": "civiwiki.routing.channel_routing",
130 },
131 }
132
133
134 # Celery Task Runner Setup
135 CELERY_BROKER_URL = REDIS_URL + '/0'
136 CELERY_RESULT_BACKEND = CELERY_BROKER_URL
137 CELERY_ACCEPT_CONTENT = ['application/json']
138 CELERY_TASK_SERIALIZER = 'json'
139 CELERY_RESULT_SERIALIZER = 'json'
140 CELERY_TIME_ZONE = TIME_ZONE
141
142
143 # AWS S3 Setup
144 if 'AWS_STORAGE_BUCKET_NAME' not in os.environ:
145 MEDIA_URL = '/media/'
146 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
147 else:
148 AWS_STORAGE_BUCKET_NAME = get_env_variable("AWS_STORAGE_BUCKET_NAME")
149 AWS_S3_ACCESS_KEY_ID = get_env_variable("AWS_S3_ACCESS_KEY_ID")
150 AWS_S3_SECRET_ACCESS_KEY = get_env_variable("AWS_S3_SECRET_ACCESS_KEY")
151 DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
152 AWS_S3_SECURE_URLS = False
153 AWS_QUERYSTRING_AUTH = False
154
155 STATIC_URL = '/static/'
156 STATICFILES_DIRS = (
157 os.path.join(BASE_DIR, 'webapp/static'),
158 )
159 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
160
161
162 # Database
163 if 'CIVIWIKI_LOCAL_NAME' not in os.environ:
164 STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
165
166 DATABASES = {
167 'default': dj_database_url.parse(get_env_variable("DATABASE_URL"))
168 }
169 else:
170 DATABASES = {
171 'default': {
172 'HOST': 'localhost',
173 'PORT': '5432',
174 'NAME': get_env_variable("CIVIWIKI_LOCAL_NAME"),
175 'ENGINE': 'django.db.backends.postgresql_psycopg2',
176 'USER': get_env_variable("CIVIWIKI_LOCAL_USERNAME"),
177 'PASSWORD': get_env_variable("CIVIWIKI_LOCAL_PASSWORD"),
178 },
179 }
180
181
182 # Email Backend Setup
183 if 'EMAIL_HOST' not in os.environ:
184 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
185 EMAIL_HOST_USER = "[email protected]"
186 else:
187 EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
188 EMAIL_HOST = get_env_variable("EMAIL_HOST")
189 EMAIL_PORT = get_env_variable("EMAIL_PORT")
190 EMAIL_HOST_USER = get_env_variable("EMAIL_HOST_USER")
191 EMAIL_HOST_PASSWORD = get_env_variable("EMAIL_HOST_PASSWORD")
192 EMAIL_USE_SSL = True
193 DEFAULT_FROM_EMAIL = EMAIL_HOST
194
195
196 # Notification API Settings
197 NOTIFICATIONS_SOFT_DELETE = True
198 NOTIFICATIONS_USE_JSONFIELD = True
199
[end of civiwiki/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/civiwiki/settings.py b/civiwiki/settings.py
--- a/civiwiki/settings.py
+++ b/civiwiki/settings.py
@@ -57,7 +57,7 @@
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
- # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually
+ 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
@@ -66,6 +66,8 @@
)
+CSRF_USE_SESSIONS = True # Store the CSRF token in the users session instead of in a cookie
+
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'civiwiki.urls'
LOGIN_URL = '/login'
|
{"golden_diff": "diff --git a/civiwiki/settings.py b/civiwiki/settings.py\n--- a/civiwiki/settings.py\n+++ b/civiwiki/settings.py\n@@ -57,7 +57,7 @@\n MIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n- # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually\n+ 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n@@ -66,6 +66,8 @@\n )\n \n \n+CSRF_USE_SESSIONS = True # Store the CSRF token in the users session instead of in a cookie\n+\n CORS_ORIGIN_ALLOW_ALL = True\n ROOT_URLCONF = 'civiwiki.urls'\n LOGIN_URL = '/login'\n", "issue": " Fix CSRF middleware\nThere is a [TODO in `settings.py` to 'fix the CSRF middleware'](https://github.com/CiviWiki/OpenCiviWiki/blob/dev/civiwiki/settings.py#L60). This issue is a placeholder to make sure we resolve the issue with CSRF middleware.\r\n\r\nWhat is the issue that prevents us from using CSRF?\n", "before_files": [{"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\nfrom django.core.exceptions import ImproperlyConfigured\nimport dj_database_url\n\n\ndef get_env_variable(environment_variable, optional=False):\n \"\"\"Get the environment variable or return exception\"\"\"\n try:\n return os.environ[environment_variable]\n except KeyError:\n if optional:\n return ''\n else:\n error = \"Environment variable '{ev}' not found.\".format(ev=environment_variable)\n raise ImproperlyConfigured(error)\n\n\n# Devlopment Environment Control\nDEBUG = 'DEBUG' in os.environ\n\nif 'DJANGO_HOST' in os.environ:\n DJANGO_HOST = get_env_variable(\"DJANGO_HOST\")\nelse:\n DJANGO_HOST = 'LOCALHOST'\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = get_env_variable(\"DJANGO_SECRET_KEY\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\"]\n\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'storages',\n 'channels',\n 'civiwiki',\n 'api',\n 'authentication',\n 'frontend_views',\n 'notifications',\n 'legislation',\n)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware', TODO: fix eventually\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = 'civiwiki.urls'\nLOGIN_URL = '/login'\n\n\n# SSL Setup\nif DJANGO_HOST is not 'LOCALHOST':\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n\n# Internationalization & Localization\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, \"webapp/templates\")], #TODO: Add non-webapp template directory\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = 'civiwiki.wsgi.application'\n\n\n# Apex Contact for Production Errors\nADMINS = [('Development Team', '[email protected]')]\n\n\n# API keys\nSUNLIGHT_API_KEY = get_env_variable(\"SUNLIGHT_API_KEY\")\nGOOGLE_API_KEY = get_env_variable(\"GOOGLE_MAP_API_KEY\")\n\n# Channels Setup\nif 'REDIS_URL' in os.environ:\n REDIS_URL = get_env_variable(\"REDIS_URL\")\nelse:\n REDIS_URL = 'redis://localhost:6379'\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"asgi_redis.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [REDIS_URL],\n },\n \"ROUTING\": \"civiwiki.routing.channel_routing\",\n },\n}\n\n\n# Celery Task Runner Setup\nCELERY_BROKER_URL = REDIS_URL + '/0'\nCELERY_RESULT_BACKEND = CELERY_BROKER_URL\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIME_ZONE = TIME_ZONE\n\n\n# AWS S3 Setup\nif 'AWS_STORAGE_BUCKET_NAME' not in os.environ:\n MEDIA_URL = '/media/'\n MEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nelse:\n AWS_STORAGE_BUCKET_NAME = get_env_variable(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_ACCESS_KEY_ID = get_env_variable(\"AWS_S3_ACCESS_KEY_ID\")\n AWS_S3_SECRET_ACCESS_KEY = get_env_variable(\"AWS_S3_SECRET_ACCESS_KEY\")\n DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n AWS_S3_SECURE_URLS = False\n AWS_QUERYSTRING_AUTH = False\n\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'webapp/static'),\n)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\n\n# Database\nif 'CIVIWIKI_LOCAL_NAME' not in os.environ:\n STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'\n\n DATABASES = {\n 'default': dj_database_url.parse(get_env_variable(\"DATABASE_URL\"))\n }\nelse:\n DATABASES = {\n 'default': {\n 'HOST': 'localhost',\n 'PORT': '5432',\n 'NAME': get_env_variable(\"CIVIWIKI_LOCAL_NAME\"),\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'USER': get_env_variable(\"CIVIWIKI_LOCAL_USERNAME\"),\n 'PASSWORD': get_env_variable(\"CIVIWIKI_LOCAL_PASSWORD\"),\n },\n }\n\n\n# Email Backend Setup\nif 'EMAIL_HOST' not in os.environ:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n EMAIL_HOST = get_env_variable(\"EMAIL_HOST\")\n EMAIL_PORT = get_env_variable(\"EMAIL_PORT\")\n EMAIL_HOST_USER = get_env_variable(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = get_env_variable(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n", "path": "civiwiki/settings.py"}]}
| 2,459 | 186 |
gh_patches_debug_5859
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-11755
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GraphQL returns all objects if tag doesn't exist
### NetBox version
v3.4.2
### Python version
3.10
### Steps to Reproduce
1. query GraphQL for device/interface with a tag that doesn't exist
### Expected Behavior
Resulting list of devices/interfaces only contains devices matching the tags or none if no device exists with matching tags.
### Observed Behavior
All devices/interfaces are returned no matter what tag filter has been applied. Same seems to apply for all kinds of filters that can be defined in GraphQL. No errors are returned.
Example below
query:
`{
device_list(tag: "blablalba") {
id
name
tags {
name
}
}
interface_list(tag: "not-ipmi_exporter") {
id
device {
id
}
tags {
name
}
}
}
`
result:
`{
"data": {
"device_list": [
{
"id": "1",
"name": "device-A",
"tags": [
{
"name": "node_exporter"
}
]
},
{
"id": "2",
"name": "device-B",
"tags": [
{
"name": "node_exporter"
}
]
}
],
"interface_list": [
{
"id": "1",
"device": {
"id": "1"
},
"tags": [
{
"name": "ipmi_exporter"
}
]
},
{
"id": "2",
"device": {
"id": "2"
},
"tags": [
{
"name": "ipmi_exporter"
}
]
}
]
}
}`
</issue>
<code>
[start of netbox/netbox/graphql/fields.py]
1 from functools import partial
2
3 import graphene
4 from graphene_django import DjangoListField
5
6 from .utils import get_graphene_type
7
8 __all__ = (
9 'ObjectField',
10 'ObjectListField',
11 )
12
13
14 class ObjectField(graphene.Field):
15 """
16 Retrieve a single object, identified by its numeric ID.
17 """
18 def __init__(self, *args, **kwargs):
19
20 if 'id' not in kwargs:
21 kwargs['id'] = graphene.Int(required=True)
22
23 super().__init__(*args, **kwargs)
24
25 @staticmethod
26 def object_resolver(django_object_type, root, info, **args):
27 """
28 Return an object given its numeric ID.
29 """
30 manager = django_object_type._meta.model._default_manager
31 queryset = django_object_type.get_queryset(manager, info)
32
33 return queryset.get(**args)
34
35 def get_resolver(self, parent_resolver):
36 return partial(self.object_resolver, self._type)
37
38
39 class ObjectListField(DjangoListField):
40 """
41 Retrieve a list of objects, optionally filtered by one or more FilterSet filters.
42 """
43 def __init__(self, _type, *args, **kwargs):
44 filter_kwargs = {}
45
46 # Get FilterSet kwargs
47 filterset_class = getattr(_type._meta, 'filterset_class', None)
48 if filterset_class:
49 for filter_name, filter_field in filterset_class.get_filters().items():
50 field_type = get_graphene_type(type(filter_field))
51 filter_kwargs[filter_name] = graphene.Argument(field_type)
52
53 super().__init__(_type, args=filter_kwargs, *args, **kwargs)
54
55 @staticmethod
56 def list_resolver(django_object_type, resolver, default_manager, root, info, **args):
57 queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)
58
59 # Instantiate and apply the FilterSet, if defined
60 filterset_class = django_object_type._meta.filterset_class
61 if filterset_class:
62 filterset = filterset_class(data=args, queryset=queryset, request=info.context)
63 return filterset.qs
64
65 return queryset
66
[end of netbox/netbox/graphql/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/netbox/graphql/fields.py b/netbox/netbox/graphql/fields.py
--- a/netbox/netbox/graphql/fields.py
+++ b/netbox/netbox/graphql/fields.py
@@ -60,6 +60,8 @@
filterset_class = django_object_type._meta.filterset_class
if filterset_class:
filterset = filterset_class(data=args, queryset=queryset, request=info.context)
+ if not filterset.is_valid():
+ return queryset.none()
return filterset.qs
return queryset
|
{"golden_diff": "diff --git a/netbox/netbox/graphql/fields.py b/netbox/netbox/graphql/fields.py\n--- a/netbox/netbox/graphql/fields.py\n+++ b/netbox/netbox/graphql/fields.py\n@@ -60,6 +60,8 @@\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n+ if not filterset.is_valid():\n+ return queryset.none()\n return filterset.qs\n \n return queryset\n", "issue": "GraphQL returns all objects if tag doesn't exist\n### NetBox version\n\nv3.4.2\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\n1. query GraphQL for device/interface with a tag that doesn't exist\n\n### Expected Behavior\n\nResulting list of devices/interfaces only contains devices matching the tags or none if no device exists with matching tags.\n\n### Observed Behavior\n\nAll devices/interfaces are returned no matter what tag filter has been applied. Same seems to apply for all kinds of filters that can be defined in GraphQL. No errors are returned.\r\n\r\nExample below\r\n\r\nquery:\r\n`{\r\n device_list(tag: \"blablalba\") {\r\n id\r\n name\r\n tags {\r\n name\r\n }\r\n }\r\n interface_list(tag: \"not-ipmi_exporter\") {\r\n id\r\n device {\r\n id\r\n }\r\n tags {\r\n name\r\n }\r\n }\r\n}\r\n`\r\n\r\nresult:\r\n`{\r\n \"data\": {\r\n \"device_list\": [\r\n {\r\n \"id\": \"1\",\r\n \"name\": \"device-A\",\r\n \"tags\": [\r\n {\r\n \"name\": \"node_exporter\"\r\n }\r\n ]\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"name\": \"device-B\",\r\n \"tags\": [\r\n {\r\n \"name\": \"node_exporter\"\r\n }\r\n ]\r\n }\r\n ],\r\n \"interface_list\": [\r\n {\r\n \"id\": \"1\",\r\n \"device\": {\r\n \"id\": \"1\"\r\n },\r\n \"tags\": [\r\n {\r\n \"name\": \"ipmi_exporter\"\r\n }\r\n ]\r\n },\r\n {\r\n \"id\": \"2\",\r\n \"device\": {\r\n \"id\": \"2\"\r\n },\r\n \"tags\": [\r\n {\r\n \"name\": \"ipmi_exporter\"\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n}`\n", "before_files": [{"content": "from functools import partial\n\nimport graphene\nfrom graphene_django import DjangoListField\n\nfrom .utils import get_graphene_type\n\n__all__ = (\n 'ObjectField',\n 'ObjectListField',\n)\n\n\nclass ObjectField(graphene.Field):\n \"\"\"\n Retrieve a single object, identified by its numeric ID.\n \"\"\"\n def __init__(self, *args, **kwargs):\n\n if 'id' not in kwargs:\n kwargs['id'] = graphene.Int(required=True)\n\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def object_resolver(django_object_type, root, info, **args):\n \"\"\"\n Return an object given its numeric ID.\n \"\"\"\n manager = django_object_type._meta.model._default_manager\n queryset = django_object_type.get_queryset(manager, info)\n\n return queryset.get(**args)\n\n def get_resolver(self, parent_resolver):\n return partial(self.object_resolver, self._type)\n\n\nclass ObjectListField(DjangoListField):\n \"\"\"\n Retrieve a list of objects, optionally filtered by one or more FilterSet filters.\n \"\"\"\n def __init__(self, _type, *args, **kwargs):\n filter_kwargs = {}\n\n # Get FilterSet kwargs\n filterset_class = getattr(_type._meta, 'filterset_class', None)\n if filterset_class:\n for filter_name, filter_field in filterset_class.get_filters().items():\n field_type = get_graphene_type(type(filter_field))\n filter_kwargs[filter_name] = graphene.Argument(field_type)\n\n super().__init__(_type, args=filter_kwargs, *args, **kwargs)\n\n @staticmethod\n def list_resolver(django_object_type, resolver, default_manager, root, info, **args):\n queryset = super(ObjectListField, ObjectListField).list_resolver(django_object_type, resolver, default_manager, root, info, **args)\n\n # Instantiate and apply the FilterSet, if defined\n filterset_class = django_object_type._meta.filterset_class\n if filterset_class:\n filterset = filterset_class(data=args, queryset=queryset, request=info.context)\n return filterset.qs\n\n return queryset\n", "path": "netbox/netbox/graphql/fields.py"}]}
| 1,524 | 121 |
gh_patches_debug_2601
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-2126
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Community Roles: Date range validation
Currently, an end date earlier than start date is allowed.
</issue>
<code>
[start of amy/communityroles/forms.py]
1 from collections import defaultdict
2 from typing import Any, Optional
3
4 from django import forms
5 from django.core.exceptions import ObjectDoesNotExist, ValidationError
6
7 from workshops.fields import HeavySelect2Widget, ModelSelect2Widget
8 from workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin
9
10 from .models import CommunityRole, CommunityRoleConfig
11
12
13 class CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):
14 class Meta:
15 model = CommunityRole
16 fields = (
17 "config",
18 "person",
19 "award",
20 "start",
21 "end",
22 "inactivation",
23 "membership",
24 "url",
25 "generic_relation_content_type",
26 "generic_relation_pk",
27 )
28 widgets = {
29 "config": HeavySelect2Widget(
30 data_view="api:communityroleconfig-list", attrs=SELECT2_SIDEBAR
31 ),
32 "person": ModelSelect2Widget(
33 data_view="person-lookup", attrs=SELECT2_SIDEBAR
34 ),
35 "award": ModelSelect2Widget(
36 data_view="award-lookup", attrs=SELECT2_SIDEBAR
37 ),
38 "membership": ModelSelect2Widget(
39 data_view="membership-lookup", attrs=SELECT2_SIDEBAR
40 ),
41 "generic_relation_content_type": forms.Select(
42 # "disabled" means the browsers will not send the field during POST.
43 # See how it's handled in `clean()` method below.
44 attrs={"disabled": ""},
45 ),
46 "generic_relation_pk": HeavySelect2Widget(
47 data_view="generic-object-lookup", attrs=SELECT2_SIDEBAR
48 ),
49 }
50 labels = {
51 "generic_relation_content_type": "Generic relation object type",
52 "generic_relation_pk": "Generic relation object",
53 }
54
55 class Media:
56 js = ("communityrole_form.js",)
57
58 def __init__(self, *args, **kwargs):
59 form_tag = kwargs.pop("form_tag", True)
60 super().__init__(*args, **kwargs)
61 bootstrap_kwargs = {
62 "add_cancel_button": False,
63 "form_tag": form_tag,
64 }
65 self.helper = BootstrapHelper(**bootstrap_kwargs)
66
67 def clean(self) -> dict[str, Any]:
68 """Validate form according to rules set up in related Community Role
69 configuration."""
70 cleaned_data = super().clean()
71 errors: defaultdict[str, list[ValidationError]] = defaultdict(list)
72 config: Optional[CommunityRoleConfig] = cleaned_data.get("config")
73
74 # Config is required, but field validation for 'config' should raise
75 # validation error first.
76 if not config:
77 return cleaned_data
78
79 # Award required?
80 if config.link_to_award and not cleaned_data.get("award"):
81 errors["award"].append(
82 ValidationError(f"Award is required with community role {config}")
83 )
84
85 # Specific award badge required?
86 if (badge := config.award_badge_limit) and (award := cleaned_data.get("award")):
87 if award.badge != badge:
88 errors["award"].append(
89 ValidationError(
90 f"Award badge must be {badge} for community role {config}"
91 )
92 )
93
94 # Membership required?
95 if config.link_to_membership and not cleaned_data.get("membership"):
96 errors["membership"].append(
97 ValidationError(f"Membership is required with community role {config}")
98 )
99
100 # Additional URL supported?
101 if not config.additional_url and cleaned_data.get("url"):
102 errors["url"].append(
103 ValidationError(f"URL is not supported for community role {config}")
104 )
105
106 # Widget for `generic_relation_content_type` is disabled in HTML, which
107 # makes browsers not send it. The code below sets the default value to
108 # the same value as in related config.
109 generic_relation_content_type = config.generic_relation_content_type
110
111 # Generic relation object must exist
112 if config.generic_relation_content_type and generic_relation_content_type:
113 model_class = generic_relation_content_type.model_class()
114 try:
115 model_class._base_manager.get(
116 pk=cleaned_data.get("generic_relation_pk")
117 )
118 except ObjectDoesNotExist:
119 errors["generic_relation_pk"].append(
120 ValidationError(
121 f"Generic relation object of model {model_class.__name__} "
122 "doesn't exist"
123 )
124 )
125
126 if errors:
127 raise ValidationError(errors)
128
129 return cleaned_data
130
[end of amy/communityroles/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py
--- a/amy/communityroles/forms.py
+++ b/amy/communityroles/forms.py
@@ -127,3 +127,11 @@
raise ValidationError(errors)
return cleaned_data
+
+ def clean_end(self):
+ """Validate that end >= start"""
+ start = self.cleaned_data.get("start")
+ end = self.cleaned_data.get("end")
+ if start and end and end < start:
+ raise ValidationError("Must not be earlier than start date.")
+ return end
|
{"golden_diff": "diff --git a/amy/communityroles/forms.py b/amy/communityroles/forms.py\n--- a/amy/communityroles/forms.py\n+++ b/amy/communityroles/forms.py\n@@ -127,3 +127,11 @@\n raise ValidationError(errors)\n \n return cleaned_data\n+\n+ def clean_end(self):\n+ \"\"\"Validate that end >= start\"\"\"\n+ start = self.cleaned_data.get(\"start\")\n+ end = self.cleaned_data.get(\"end\")\n+ if start and end and end < start:\n+ raise ValidationError(\"Must not be earlier than start date.\")\n+ return end\n", "issue": "Community Roles: Date range validation\nCurrently, an end date earlier than start date is allowed. \n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import Any, Optional\n\nfrom django import forms\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\n\nfrom workshops.fields import HeavySelect2Widget, ModelSelect2Widget\nfrom workshops.forms import SELECT2_SIDEBAR, BootstrapHelper, WidgetOverrideMixin\n\nfrom .models import CommunityRole, CommunityRoleConfig\n\n\nclass CommunityRoleForm(WidgetOverrideMixin, forms.ModelForm):\n class Meta:\n model = CommunityRole\n fields = (\n \"config\",\n \"person\",\n \"award\",\n \"start\",\n \"end\",\n \"inactivation\",\n \"membership\",\n \"url\",\n \"generic_relation_content_type\",\n \"generic_relation_pk\",\n )\n widgets = {\n \"config\": HeavySelect2Widget(\n data_view=\"api:communityroleconfig-list\", attrs=SELECT2_SIDEBAR\n ),\n \"person\": ModelSelect2Widget(\n data_view=\"person-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"award\": ModelSelect2Widget(\n data_view=\"award-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"membership\": ModelSelect2Widget(\n data_view=\"membership-lookup\", attrs=SELECT2_SIDEBAR\n ),\n \"generic_relation_content_type\": forms.Select(\n # \"disabled\" means the browsers will not send the field during POST.\n # See how it's handled in `clean()` method below.\n attrs={\"disabled\": \"\"},\n ),\n \"generic_relation_pk\": HeavySelect2Widget(\n data_view=\"generic-object-lookup\", attrs=SELECT2_SIDEBAR\n ),\n }\n labels = {\n \"generic_relation_content_type\": \"Generic relation object type\",\n \"generic_relation_pk\": \"Generic relation object\",\n }\n\n class Media:\n js = (\"communityrole_form.js\",)\n\n def __init__(self, *args, **kwargs):\n form_tag = kwargs.pop(\"form_tag\", True)\n super().__init__(*args, **kwargs)\n bootstrap_kwargs = {\n \"add_cancel_button\": False,\n \"form_tag\": form_tag,\n }\n self.helper = BootstrapHelper(**bootstrap_kwargs)\n\n def clean(self) -> dict[str, Any]:\n \"\"\"Validate form according to rules set up in related Community Role\n configuration.\"\"\"\n cleaned_data = super().clean()\n errors: defaultdict[str, list[ValidationError]] = defaultdict(list)\n config: Optional[CommunityRoleConfig] = cleaned_data.get(\"config\")\n\n # Config is required, but field validation for 'config' should raise\n # validation error first.\n if not config:\n return cleaned_data\n\n # Award required?\n if config.link_to_award and not cleaned_data.get(\"award\"):\n errors[\"award\"].append(\n ValidationError(f\"Award is required with community role {config}\")\n )\n\n # Specific award badge required?\n if (badge := config.award_badge_limit) and (award := cleaned_data.get(\"award\")):\n if award.badge != badge:\n errors[\"award\"].append(\n ValidationError(\n f\"Award badge must be {badge} for community role {config}\"\n )\n )\n\n # Membership required?\n if config.link_to_membership and not cleaned_data.get(\"membership\"):\n errors[\"membership\"].append(\n ValidationError(f\"Membership is required with community role {config}\")\n )\n\n # Additional URL supported?\n if not config.additional_url and cleaned_data.get(\"url\"):\n errors[\"url\"].append(\n ValidationError(f\"URL is not supported for community role {config}\")\n )\n\n # Widget for `generic_relation_content_type` is disabled in HTML, which\n # makes browsers not send it. The code below sets the default value to\n # the same value as in related config.\n generic_relation_content_type = config.generic_relation_content_type\n\n # Generic relation object must exist\n if config.generic_relation_content_type and generic_relation_content_type:\n model_class = generic_relation_content_type.model_class()\n try:\n model_class._base_manager.get(\n pk=cleaned_data.get(\"generic_relation_pk\")\n )\n except ObjectDoesNotExist:\n errors[\"generic_relation_pk\"].append(\n ValidationError(\n f\"Generic relation object of model {model_class.__name__} \"\n \"doesn't exist\"\n )\n )\n\n if errors:\n raise ValidationError(errors)\n\n return cleaned_data\n", "path": "amy/communityroles/forms.py"}]}
| 1,767 | 130 |
gh_patches_debug_12625
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-1530
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
License file in pypi archive
The pypi source archive isn't including the `LICENSE.txt` file. Would it be possible to add it? It is very helpful when packaging this for Linux distributions. The official python packaging guidelines also say [each package should have one](https://packaging.python.org/guides/distributing-packages-using-setuptools/#license-txt). Thank you!
Add keywords to setup.py
Testing with pyroma package showed keywords are missing from setup.py
```
willo@macbook ~/g/m/h/hypothesis-python> pyroma .
------------------------------
Checking .
Found hypothesis
------------------------------
Your package does not have keywords data.
------------------------------
Final rating: 9/10
Cottage Cheese
------------------------------
```
Discussed with @Zac-HD and keywords from the github repo below would be good to use.
python testing fuzzing property-based-testing
</issue>
<code>
[start of hypothesis-python/setup.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22 import warnings
23
24 import setuptools
25
26
27 def local_file(name):
28 return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
29
30
31 SOURCE = local_file('src')
32 README = local_file('README.rst')
33
34 setuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))
35
36 if setuptools_version < (36, 2):
37 # Warning only - very bad if uploading bdist but fine if installing sdist.
38 warnings.warn(
39 'This version of setuptools is too old to correctly store '
40 'conditional dependencies in binary wheels. For more info, see: '
41 'https://hynek.me/articles/conditional-python-dependencies/'
42 )
43
44
45 # Assignment to placate pyflakes. The actual version is from the exec that
46 # follows.
47 __version__ = None
48
49 with open(local_file('src/hypothesis/version.py')) as o:
50 exec(o.read())
51
52 assert __version__ is not None
53
54
55 extras = {
56 'datetime': ['pytz'],
57 'pytz': ['pytz'],
58 'dateutil': ['python-dateutil'],
59 'fakefactory': ['Faker>=0.7'],
60 'numpy': ['numpy>=1.9.0'],
61 'pytest': ['pytest>=3.0'],
62 # We only support Django versions with upstream support - see
63 # https://www.djangoproject.com/download/#supported-versions
64 'django': ['pytz', 'django>=1.11'],
65 }
66
67 extras['faker'] = extras['fakefactory']
68 extras['all'] = sorted(sum(extras.values(), []))
69
70
71 install_requires = ['attrs>=16.0.0', 'coverage>=4.0']
72 # Using an environment marker on enum34 makes the dependency condition
73 # independent of the build environemnt, which is important for wheels.
74 # https://www.python.org/dev/peps/pep-0345/#environment-markers
75 if sys.version_info[0] < 3 and setuptools_version < (8, 0):
76 # Except really old systems, where we give up and install unconditionally
77 install_requires.append('enum34')
78 else:
79 install_requires.append('enum34; python_version=="2.7"')
80
81
82 setuptools.setup(
83 name='hypothesis',
84 version=__version__,
85 author='David R. MacIver',
86 author_email='[email protected]',
87 packages=setuptools.find_packages(SOURCE),
88 package_dir={'': SOURCE},
89 package_data={'hypothesis': ['py.typed']},
90 url=(
91 'https://github.com/HypothesisWorks/hypothesis/'
92 'tree/master/hypothesis-python'
93 ),
94 license='MPL v2',
95 description='A library for property based testing',
96 zip_safe=False,
97 extras_require=extras,
98 install_requires=install_requires,
99 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
100 classifiers=[
101 'Development Status :: 5 - Production/Stable',
102 'Intended Audience :: Developers',
103 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
104 'Operating System :: Unix',
105 'Operating System :: POSIX',
106 'Operating System :: Microsoft :: Windows',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: 2.7',
109 'Programming Language :: Python :: 3',
110 'Programming Language :: Python :: 3.4',
111 'Programming Language :: Python :: 3.5',
112 'Programming Language :: Python :: 3.6',
113 'Programming Language :: Python :: 3.7',
114 'Programming Language :: Python :: Implementation :: CPython',
115 'Programming Language :: Python :: Implementation :: PyPy',
116 'Topic :: Software Development :: Testing',
117 'Framework :: Pytest',
118 ],
119 entry_points={
120 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
121 },
122 long_description=open(README).read(),
123 )
124
[end of hypothesis-python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py
--- a/hypothesis-python/setup.py
+++ b/hypothesis-python/setup.py
@@ -53,9 +53,9 @@
extras = {
- 'datetime': ['pytz'],
- 'pytz': ['pytz'],
- 'dateutil': ['python-dateutil'],
+ 'datetime': ['pytz>=2014.1'],
+ 'pytz': ['pytz>=2014.1'],
+ 'dateutil': ['python-dateutil>=1.4'],
'fakefactory': ['Faker>=0.7'],
'numpy': ['numpy>=1.9.0'],
'pytest': ['pytest>=3.0'],
@@ -120,4 +120,5 @@
'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
},
long_description=open(README).read(),
+ keywords='python testing fuzzing property-based-testing',
)
|
{"golden_diff": "diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py\n--- a/hypothesis-python/setup.py\n+++ b/hypothesis-python/setup.py\n@@ -53,9 +53,9 @@\n \n \n extras = {\n- 'datetime': ['pytz'],\n- 'pytz': ['pytz'],\n- 'dateutil': ['python-dateutil'],\n+ 'datetime': ['pytz>=2014.1'],\n+ 'pytz': ['pytz>=2014.1'],\n+ 'dateutil': ['python-dateutil>=1.4'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n@@ -120,4 +120,5 @@\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n+ keywords='python testing fuzzing property-based-testing',\n )\n", "issue": "License file in pypi archive\nThe pypi source archive isn't including the `LICENSE.txt` file. Would it be possible to add it? It is very helpful when packaging this for Linux distributions. The official python packaging guidelines also say [each package should have one](https://packaging.python.org/guides/distributing-packages-using-setuptools/#license-txt). Thank you!\nAdd keywords to setup.py\nTesting with pyroma package showed keywords are missing from setup.py\r\n\r\n```\r\nwillo@macbook ~/g/m/h/hypothesis-python> pyroma .\r\n------------------------------\r\nChecking .\r\nFound hypothesis\r\n------------------------------\r\nYour package does not have keywords data.\r\n------------------------------\r\nFinal rating: 9/10\r\nCottage Cheese\r\n------------------------------\r\n```\r\nDiscussed with @Zac-HD and keywords from the github repo below would be good to use.\r\n\r\npython testing fuzzing property-based-testing\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n 'This version of setuptools is too old to correctly store '\n 'conditional dependencies in binary wheels. For more info, see: '\n 'https://hynek.me/articles/conditional-python-dependencies/'\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz'],\n 'pytz': ['pytz'],\n 'dateutil': ['python-dateutil'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=3.0'],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n 'django': ['pytz', 'django>=1.11'],\n}\n\nextras['faker'] = extras['fakefactory']\nextras['all'] = sorted(sum(extras.values(), []))\n\n\ninstall_requires = ['attrs>=16.0.0', 'coverage>=4.0']\n# Using an environment marker on enum34 makes the dependency condition\n# independent of the build environemnt, which is important for wheels.\n# https://www.python.org/dev/peps/pep-0345/#environment-markers\nif sys.version_info[0] < 3 and setuptools_version < (8, 0):\n # Except really old systems, where we give up and install unconditionally\n install_requires.append('enum34')\nelse:\n install_requires.append('enum34; python_version==\"2.7\"')\n\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='[email protected]',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n package_data={'hypothesis': ['py.typed']},\n url=(\n 'https://github.com/HypothesisWorks/hypothesis/'\n 'tree/master/hypothesis-python'\n ),\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n 'Framework :: Pytest',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n)\n", "path": "hypothesis-python/setup.py"}]}
| 2,041 | 223 |
gh_patches_debug_3972
|
rasdani/github-patches
|
git_diff
|
pytorch__audio-635
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
multiple definitions of `SoxEffect`.
`SoxEffect` is defined both in
https://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.h#L50-L54
and
https://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.cpp#L73-L77
-----
-----
`torch_sox.h` can be included into `torch_sox.cpp` so that `SoxEffect` needs to be defined
only once in the header.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import os
3 import platform
4 import sys
5 import subprocess
6
7 from setuptools import setup, find_packages
8 from torch.utils.cpp_extension import BuildExtension, CppExtension
9
10
11 def check_env_flag(name, default=''):
12 return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])
13
14 DEBUG = check_env_flag('DEBUG')
15 IS_WHEEL = check_env_flag('IS_WHEEL')
16 IS_CONDA = check_env_flag('IS_CONDA')
17
18 print('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)
19
20 eca = []
21 ela = []
22 if DEBUG:
23 if platform.system() == 'Windows':
24 ela += ['/DEBUG:FULL']
25 else:
26 eca += ['-O0', '-g']
27 ela += ['-O0', '-g']
28
29
30 libraries = []
31 include_dirs = []
32 extra_objects = []
33
34 # Hypothetically, the conda distribution could rely on an external sox,
35 # but the library is pretty small and it is not available on the default
36 # anaconda channel. So we statically link it in, just as we do with wheels.
37 if IS_WHEEL or IS_CONDA:
38 audio_path = os.path.dirname(os.path.abspath(__file__))
39
40 include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]
41 include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]
42 include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]
43 include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]
44
45 # proper link order (sox, mad, flac, lame)
46 # (the most important thing is that dependencies come after a libraryl
47 # e.g., sox comes first)
48 extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]
49 extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]
50 extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]
51 extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]
52 else:
53 libraries += ['sox']
54
55
56 # Creating the version file
57 cwd = os.path.dirname(os.path.abspath(__file__))
58 version = '0.6.0a0'
59 sha = 'Unknown'
60
61 try:
62 sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
63 except Exception:
64 pass
65
66 if os.getenv('BUILD_VERSION'):
67 version = os.getenv('BUILD_VERSION')
68 elif sha != 'Unknown':
69 version += '+' + sha[:7]
70 print('-- Building version ' + version)
71
72 version_path = os.path.join(cwd, 'torchaudio', 'version.py')
73 with open(version_path, 'w') as f:
74 f.write("__version__ = '{}'\n".format(version))
75 f.write("git_version = {}\n".format(repr(sha)))
76
77 pytorch_package_version = os.getenv('PYTORCH_VERSION')
78
79 pytorch_package_dep = 'torch'
80 if pytorch_package_version is not None:
81 pytorch_package_dep += "==" + pytorch_package_version
82
83 if platform.system() == 'Windows':
84 ext_modules = None
85 else:
86 ext_modules = [
87 CppExtension(
88 '_torch_sox',
89 ['torchaudio/torch_sox.cpp'],
90 libraries=libraries,
91 include_dirs=include_dirs,
92 extra_compile_args=eca,
93 extra_objects=extra_objects,
94 extra_link_args=ela),
95 ]
96
97 setup(
98 name="torchaudio",
99 version=version,
100 description="An audio package for PyTorch",
101 url="https://github.com/pytorch/audio",
102 author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough",
103 author_email="[email protected]",
104 classifiers=[
105 "Environment :: Plugins",
106 "Intended Audience :: Developers",
107 "Intended Audience :: Science/Research",
108 "License :: OSI Approved :: BSD License",
109 "Operating System :: MacOS :: MacOS X",
110 "Operating System :: Microsoft :: Windows",
111 "Operating System :: POSIX",
112 "Programming Language :: C++",
113 "Programming Language :: Python :: 2.7",
114 "Programming Language :: Python :: 3",
115 "Programming Language :: Python :: Implementation :: CPython",
116 "Topic :: Multimedia :: Sound/Audio",
117 "Topic :: Scientific/Engineering :: Artificial Intelligence"
118 ],
119 # Exclude the build files.
120 packages=find_packages(exclude=["build"]),
121 ext_modules=ext_modules,
122 cmdclass={'build_ext': BuildExtension},
123 install_requires=[pytorch_package_dep]
124 )
125
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -88,7 +88,7 @@
'_torch_sox',
['torchaudio/torch_sox.cpp'],
libraries=libraries,
- include_dirs=include_dirs,
+ include_dirs=include_dirs + [cwd],
extra_compile_args=eca,
extra_objects=extra_objects,
extra_link_args=ela),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,7 +88,7 @@\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n- include_dirs=include_dirs,\n+ include_dirs=include_dirs + [cwd],\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n", "issue": "multiple definitions of `SoxEffect`.\n`SoxEffect` is defined both in\r\n\r\nhttps://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.h#L50-L54\r\n\r\nand\r\n\r\nhttps://github.com/pytorch/audio/blob/00d38203e401b8d9472a8f8394a10e2c309be02c/torchaudio/torch_sox.cpp#L73-L77\r\n\r\n\r\n-----\r\n\r\n-----\r\n\r\n`torch_sox.h` can be included into `torch_sox.cpp` so that `SoxEffect` needs to be defined\r\nonly once in the header.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport platform\nimport sys\nimport subprocess\n\nfrom setuptools import setup, find_packages\nfrom torch.utils.cpp_extension import BuildExtension, CppExtension\n\n\ndef check_env_flag(name, default=''):\n return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])\n\nDEBUG = check_env_flag('DEBUG')\nIS_WHEEL = check_env_flag('IS_WHEEL')\nIS_CONDA = check_env_flag('IS_CONDA')\n\nprint('DEBUG:', DEBUG, 'IS_WHEEL:', IS_WHEEL, 'IS_CONDA:', IS_CONDA)\n\neca = []\nela = []\nif DEBUG:\n if platform.system() == 'Windows':\n ela += ['/DEBUG:FULL']\n else:\n eca += ['-O0', '-g']\n ela += ['-O0', '-g']\n\n\nlibraries = []\ninclude_dirs = []\nextra_objects = []\n\n# Hypothetically, the conda distribution could rely on an external sox,\n# but the library is pretty small and it is not available on the default\n# anaconda channel. So we statically link it in, just as we do with wheels.\nif IS_WHEEL or IS_CONDA:\n audio_path = os.path.dirname(os.path.abspath(__file__))\n\n include_dirs += [os.path.join(audio_path, 'third_party/flac/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/lame/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/sox/include')]\n include_dirs += [os.path.join(audio_path, 'third_party/mad/include')]\n\n # proper link order (sox, mad, flac, lame)\n # (the most important thing is that dependencies come after a libraryl\n # e.g., sox comes first)\n extra_objects += [os.path.join(audio_path, 'third_party/sox/lib/libsox.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/mad/lib/libmad.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/flac/lib/libFLAC.a')]\n extra_objects += [os.path.join(audio_path, 'third_party/lame/lib/libmp3lame.a')]\nelse:\n libraries += ['sox']\n\n\n# Creating the version file\ncwd = os.path.dirname(os.path.abspath(__file__))\nversion = '0.6.0a0'\nsha = 'Unknown'\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept Exception:\n pass\n\nif os.getenv('BUILD_VERSION'):\n version = os.getenv('BUILD_VERSION')\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint('-- Building version ' + version)\n\nversion_path = os.path.join(cwd, 'torchaudio', 'version.py')\nwith open(version_path, 'w') as f:\n f.write(\"__version__ = '{}'\\n\".format(version))\n f.write(\"git_version = {}\\n\".format(repr(sha)))\n\npytorch_package_version = os.getenv('PYTORCH_VERSION')\n\npytorch_package_dep = 'torch'\nif pytorch_package_version is not None:\n pytorch_package_dep += \"==\" + pytorch_package_version\n\nif platform.system() == 'Windows':\n ext_modules = None\nelse:\n ext_modules = [\n CppExtension(\n '_torch_sox',\n ['torchaudio/torch_sox.cpp'],\n libraries=libraries,\n include_dirs=include_dirs,\n extra_compile_args=eca,\n extra_objects=extra_objects,\n extra_link_args=ela),\n ]\n\nsetup(\n name=\"torchaudio\",\n version=version,\n description=\"An audio package for PyTorch\",\n url=\"https://github.com/pytorch/audio\",\n author=\"Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Environment :: Plugins\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\"\n ],\n # Exclude the build files.\n packages=find_packages(exclude=[\"build\"]),\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension},\n install_requires=[pytorch_package_dep]\n)\n", "path": "setup.py"}]}
| 2,014 | 94 |
gh_patches_debug_9080
|
rasdani/github-patches
|
git_diff
|
ARM-DOE__ACT-634
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Valid Min in act.utils.decode_present_weather
* ACT version: 1.3.4
* Python version:3.9.15
* Operating System: Windows 10
### Description
I am receiving an error for "del data.attrs['valid_min'] when reading in data trying to decode present weather data.
### What I Did
```
act.utils.decode_present_weather(ds_fd70,'present_wx2',precip_codes)
decode_present_weather
del data.attrs['valid_min']
KeyError: 'valid_min'
When outputting time I have:
ds_fd70.time
array(['2023-03-09T16:45:00.000000000', '2023-03-09T17:00:00.000000000',
'2023-03-09T17:15:00.000000000', '2023-03-09T17:30:00.000000000',
'2023-03-09T17:45:00.000000000', '2023-03-09T18:00:00.000000000',
'2023-03-09T18:15:00.000000000', '2023-03-09T18:30:00.000000000',
'2023-03-09T18:45:00.000000000', '2023-03-09T19:00:00.000000000',
'2023-03-09T19:15:00.000000000', '2023-03-09T19:30:00.000000000',
```
</issue>
<code>
[start of act/utils/inst_utils.py]
1 """
2 Functions containing utilities for instruments.
3
4 """
5
6
7 def decode_present_weather(ds, variable=None, decoded_name=None):
8 """
9 This function is to decode codes reported from automatic weather stations suchas the PWD22.
10 This is based on WMO Table 4680.
11
12 Parameters
13 ----------
14 ds : xarray.Dataset
15 ACT or Xarray dataset from which to convert codes
16 variable : string
17 Variable to decode
18 decoded_name : string
19 New variable name to store updated labels
20
21 Returns
22 -------
23 ds : xarray.Dataset
24 Returns dataset with new decoded data
25
26 References
27 ----------
28 WMO Manual on Code Volume I.1
29 https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf
30
31 """
32
33 # Check to ensure that a variable name is passed
34 if variable is None:
35 raise ValueError('You must specify a variable')
36
37 if variable not in ds:
38 raise ValueError('Variable not in the dataset')
39
40 # Define the weather hash
41 weather = {
42 0: 'No significant weather observed',
43 1: 'Clouds generally dissolving or becoming less developed during the past hour',
44 2: 'State of the sky on the whole unchanged during the past hour',
45 3: 'Clouds generally forming or developing during the past hour',
46 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',
47 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',
48 10: 'Mist',
49 11: 'Diamond dust',
50 12: 'Distant lightning',
51 18: 'Squalls',
52 20: 'Fog',
53 21: 'Precipitation',
54 22: 'Drizzle (not freezing) or snow grains',
55 23: 'Rain (not freezing)',
56 24: 'Snow',
57 25: 'Freezing drizzle or freezing rain',
58 26: 'Thunderstorm (with or without precipitation)',
59 27: 'Blowing or drifting snow or sand',
60 28: 'Blowing or drifting snow or sand, visibility >= 1 km',
61 29: 'Blowing or drifting snow or sand, visibility < 1 km',
62 30: 'Fog',
63 31: 'Fog or ice fog in patches',
64 32: 'Fog or ice fog, has become thinner during the past hour',
65 33: 'Fog or ice fog, no appreciable change during the past hour',
66 34: 'Fog or ice fog, has begun or become thicker during the past hour',
67 35: 'Fog, depositing rime',
68 40: 'Precipitation',
69 41: 'Precipitation, slight or moderate',
70 42: 'Precipitation, heavy',
71 43: 'Liquid precipitation, slight or moderate',
72 44: 'Liquid precipitation, heavy',
73 45: 'Solid precipitation, slight or moderate',
74 46: 'Solid precipitation, heavy',
75 47: 'Freezing precipitation, slight or moderate',
76 48: 'Freezing precipitation, heavy',
77 50: 'Drizzle',
78 51: 'Drizzle, not freezing, slight',
79 52: 'Drizzle, not freezing, moderate',
80 53: 'Drizzle, not freezing, heavy',
81 54: 'Drizzle, freezing, slight',
82 55: 'Drizzle, freezing, moderate',
83 56: 'Drizzle, freezing, heavy',
84 57: 'Drizzle and rain, slight',
85 58: 'Drizzle and rain, moderate or heavy',
86 60: 'Rain',
87 61: 'Rain, not freezing, slight',
88 62: 'Rain, not freezing, moderate',
89 63: 'Rain, not freezing, heavy',
90 64: 'Rain, freezing, slight',
91 65: 'Rain, freezing, moderate',
92 66: 'Rain, freezing, heavy',
93 67: 'Rain (or drizzle) and snow, slight',
94 68: 'Rain (or drizzle) and snow, moderate or heavy',
95 70: 'Snow',
96 71: 'Snow, light',
97 72: 'Snow, moderate',
98 73: 'Snow, heavy',
99 74: 'Ice pellets, slight',
100 75: 'Ice pellets, moderate',
101 76: 'Ice pellets, heavy',
102 77: 'Snow grains',
103 78: 'Ice crystals',
104 80: 'Shower(s) or Intermittent Precipitation',
105 81: 'Rain shower(s) or intermittent rain, slight',
106 82: 'Rain shower(s) or intermittent rain, moderate',
107 83: 'Rain shower(s) or intermittent rain, heavy',
108 84: 'Rain shower(s) or intermittent rain, violent',
109 85: 'Snow shower(s) or intermittent snow, slight',
110 86: 'Snow shower(s) or intermittent snow, moderate',
111 87: 'Snow shower(s) or intermittent snow, heavy',
112 89: 'Hail',
113 90: 'Thunderstorm',
114 91: 'Thunderstorm, slight or moderate, with no precipitation',
115 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',
116 93: 'Thunderstorm, slight or moderate, with hail',
117 94: 'Thunderstorm, heavy, with no precipitation',
118 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',
119 96: 'Thunderstorm, heavy, with hail',
120 99: 'Tornado',
121 -9999: 'Missing',
122 }
123
124 # If a decoded name is not passed, make one
125 if decoded_name is None:
126 decoded_name = variable + '_decoded'
127
128 # Get data and fill nans with -9999
129 data = ds[variable]
130 data = data.fillna(-9999)
131
132 # Get the weather type for each code
133 wx_type = [weather[d] for d in data.values]
134
135 # Massage the data array to set back in the dataset
136 data.values = wx_type
137 data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
138 del data.attrs['valid_min']
139 del data.attrs['valid_max']
140
141 ds[decoded_name] = data
142
143 return ds
144
[end of act/utils/inst_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/act/utils/inst_utils.py b/act/utils/inst_utils.py
--- a/act/utils/inst_utils.py
+++ b/act/utils/inst_utils.py
@@ -134,9 +134,14 @@
# Massage the data array to set back in the dataset
data.values = wx_type
- data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
- del data.attrs['valid_min']
- del data.attrs['valid_max']
+ if 'long_name' in data.attrs:
+ data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'
+ else:
+ data.attrs['long_name'] = 'Decoded present weather values'
+ if 'valid_min' in data.attrs:
+ del data.attrs['valid_min']
+ if 'valid_max' in data.attrs:
+ del data.attrs['valid_max']
ds[decoded_name] = data
|
{"golden_diff": "diff --git a/act/utils/inst_utils.py b/act/utils/inst_utils.py\n--- a/act/utils/inst_utils.py\n+++ b/act/utils/inst_utils.py\n@@ -134,9 +134,14 @@\n \n # Massage the data array to set back in the dataset\n data.values = wx_type\n- data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n- del data.attrs['valid_min']\n- del data.attrs['valid_max']\n+ if 'long_name' in data.attrs:\n+ data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n+ else:\n+ data.attrs['long_name'] = 'Decoded present weather values'\n+ if 'valid_min' in data.attrs:\n+ del data.attrs['valid_min']\n+ if 'valid_max' in data.attrs:\n+ del data.attrs['valid_max']\n \n ds[decoded_name] = data\n", "issue": "Valid Min in act.utils.decode_present_weather\n* ACT version: 1.3.4\r\n* Python version:3.9.15\r\n* Operating System: Windows 10\r\n\r\n### Description\r\n\r\nI am receiving an error for \"del data.attrs['valid_min'] when reading in data trying to decode present weather data. \r\n\r\n### What I Did\r\n\r\n```\r\nact.utils.decode_present_weather(ds_fd70,'present_wx2',precip_codes)\r\ndecode_present_weather\r\n del data.attrs['valid_min']\r\n\r\nKeyError: 'valid_min'\r\n\r\nWhen outputting time I have:\r\nds_fd70.time\r\n\r\narray(['2023-03-09T16:45:00.000000000', '2023-03-09T17:00:00.000000000',\r\n '2023-03-09T17:15:00.000000000', '2023-03-09T17:30:00.000000000',\r\n '2023-03-09T17:45:00.000000000', '2023-03-09T18:00:00.000000000',\r\n '2023-03-09T18:15:00.000000000', '2023-03-09T18:30:00.000000000',\r\n '2023-03-09T18:45:00.000000000', '2023-03-09T19:00:00.000000000',\r\n '2023-03-09T19:15:00.000000000', '2023-03-09T19:30:00.000000000',\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nFunctions containing utilities for instruments.\n\n\"\"\"\n\n\ndef decode_present_weather(ds, variable=None, decoded_name=None):\n \"\"\"\n This function is to decode codes reported from automatic weather stations suchas the PWD22.\n This is based on WMO Table 4680.\n\n Parameters\n ----------\n ds : xarray.Dataset\n ACT or Xarray dataset from which to convert codes\n variable : string\n Variable to decode\n decoded_name : string\n New variable name to store updated labels\n\n Returns\n -------\n ds : xarray.Dataset\n Returns dataset with new decoded data\n\n References\n ----------\n WMO Manual on Code Volume I.1\n https://www.wmo.int/pages/prog/www/WMOCodes/WMO306_vI1/Publications/2017update/Sel9.pdf\n\n \"\"\"\n\n # Check to ensure that a variable name is passed\n if variable is None:\n raise ValueError('You must specify a variable')\n\n if variable not in ds:\n raise ValueError('Variable not in the dataset')\n\n # Define the weather hash\n weather = {\n 0: 'No significant weather observed',\n 1: 'Clouds generally dissolving or becoming less developed during the past hour',\n 2: 'State of the sky on the whole unchanged during the past hour',\n 3: 'Clouds generally forming or developing during the past hour',\n 4: 'Haze or smoke, or dust in suspension in the air, visibility >= 1 km',\n 5: 'Haze or smoke, or dust in suspension in the air, visibility < 1 km',\n 10: 'Mist',\n 11: 'Diamond dust',\n 12: 'Distant lightning',\n 18: 'Squalls',\n 20: 'Fog',\n 21: 'Precipitation',\n 22: 'Drizzle (not freezing) or snow grains',\n 23: 'Rain (not freezing)',\n 24: 'Snow',\n 25: 'Freezing drizzle or freezing rain',\n 26: 'Thunderstorm (with or without precipitation)',\n 27: 'Blowing or drifting snow or sand',\n 28: 'Blowing or drifting snow or sand, visibility >= 1 km',\n 29: 'Blowing or drifting snow or sand, visibility < 1 km',\n 30: 'Fog',\n 31: 'Fog or ice fog in patches',\n 32: 'Fog or ice fog, has become thinner during the past hour',\n 33: 'Fog or ice fog, no appreciable change during the past hour',\n 34: 'Fog or ice fog, has begun or become thicker during the past hour',\n 35: 'Fog, depositing rime',\n 40: 'Precipitation',\n 41: 'Precipitation, slight or moderate',\n 42: 'Precipitation, heavy',\n 43: 'Liquid precipitation, slight or moderate',\n 44: 'Liquid precipitation, heavy',\n 45: 'Solid precipitation, slight or moderate',\n 46: 'Solid precipitation, heavy',\n 47: 'Freezing precipitation, slight or moderate',\n 48: 'Freezing precipitation, heavy',\n 50: 'Drizzle',\n 51: 'Drizzle, not freezing, slight',\n 52: 'Drizzle, not freezing, moderate',\n 53: 'Drizzle, not freezing, heavy',\n 54: 'Drizzle, freezing, slight',\n 55: 'Drizzle, freezing, moderate',\n 56: 'Drizzle, freezing, heavy',\n 57: 'Drizzle and rain, slight',\n 58: 'Drizzle and rain, moderate or heavy',\n 60: 'Rain',\n 61: 'Rain, not freezing, slight',\n 62: 'Rain, not freezing, moderate',\n 63: 'Rain, not freezing, heavy',\n 64: 'Rain, freezing, slight',\n 65: 'Rain, freezing, moderate',\n 66: 'Rain, freezing, heavy',\n 67: 'Rain (or drizzle) and snow, slight',\n 68: 'Rain (or drizzle) and snow, moderate or heavy',\n 70: 'Snow',\n 71: 'Snow, light',\n 72: 'Snow, moderate',\n 73: 'Snow, heavy',\n 74: 'Ice pellets, slight',\n 75: 'Ice pellets, moderate',\n 76: 'Ice pellets, heavy',\n 77: 'Snow grains',\n 78: 'Ice crystals',\n 80: 'Shower(s) or Intermittent Precipitation',\n 81: 'Rain shower(s) or intermittent rain, slight',\n 82: 'Rain shower(s) or intermittent rain, moderate',\n 83: 'Rain shower(s) or intermittent rain, heavy',\n 84: 'Rain shower(s) or intermittent rain, violent',\n 85: 'Snow shower(s) or intermittent snow, slight',\n 86: 'Snow shower(s) or intermittent snow, moderate',\n 87: 'Snow shower(s) or intermittent snow, heavy',\n 89: 'Hail',\n 90: 'Thunderstorm',\n 91: 'Thunderstorm, slight or moderate, with no precipitation',\n 92: 'Thunderstorm, slight or moderate, with rain showers and/or snow showers',\n 93: 'Thunderstorm, slight or moderate, with hail',\n 94: 'Thunderstorm, heavy, with no precipitation',\n 95: 'Thunderstorm, heavy, with rain showers and/or snow showers',\n 96: 'Thunderstorm, heavy, with hail',\n 99: 'Tornado',\n -9999: 'Missing',\n }\n\n # If a decoded name is not passed, make one\n if decoded_name is None:\n decoded_name = variable + '_decoded'\n\n # Get data and fill nans with -9999\n data = ds[variable]\n data = data.fillna(-9999)\n\n # Get the weather type for each code\n wx_type = [weather[d] for d in data.values]\n\n # Massage the data array to set back in the dataset\n data.values = wx_type\n data.attrs['long_name'] = data.attrs['long_name'] + ' Decoded'\n del data.attrs['valid_min']\n del data.attrs['valid_max']\n\n ds[decoded_name] = data\n\n return ds\n", "path": "act/utils/inst_utils.py"}]}
| 2,869 | 215 |
gh_patches_debug_42912
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3537
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix symlinks not copied during export
- [x] Refer to the issue that supports this Pull Request: fixes #3258
- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.
- [x] I've read the [Contributing guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've followed the PEP8 style guides for Python code.
- [ ] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one. Also adding a description of the changes in the ``changelog.rst`` file. https://github.com/conan-io/docs
</issue>
<code>
[start of conans/client/file_copier.py]
1 import os
2 import fnmatch
3 import shutil
4 from collections import defaultdict
5
6 from conans.util.files import mkdir
7
8
9 def report_copied_files(copied, output):
10 ext_files = defaultdict(list)
11 for f in copied:
12 _, ext = os.path.splitext(f)
13 ext_files[ext].append(os.path.basename(f))
14
15 if not ext_files:
16 return False
17
18 for ext, files in ext_files.items():
19 files_str = (", ".join(files)) if len(files) < 5 else ""
20 file_or_files = "file" if len(files) == 1 else "files"
21 if not ext:
22 output.info("Copied %d %s: %s" % (len(files), file_or_files, files_str))
23 else:
24 output.info("Copied %d '%s' %s: %s" % (len(files), ext, file_or_files, files_str))
25 return True
26
27
28 class FileCopier(object):
29 """ main responsible of copying files from place to place:
30 package: build folder -> package folder
31 imports: package folder -> user folder
32 export: user folder -> store "export" folder
33 """
34 def __init__(self, root_source_folder, root_destination_folder, excluded=None):
35 """
36 Takes the base folders to copy resources src -> dst. These folders names
37 will not be used in the relative names while copying
38 param root_source_folder: The base folder to copy things from, typically the
39 store build folder
40 param root_destination_folder: The base folder to copy things to, typicall the
41 store package folder
42 """
43 self._base_src = root_source_folder
44 self._base_dst = root_destination_folder
45 self._copied = []
46 self._excluded = [root_destination_folder]
47 if excluded:
48 self._excluded.append(excluded)
49
50 def report(self, output):
51 return report_copied_files(self._copied, output)
52
53 def __call__(self, pattern, dst="", src="", keep_path=True, links=False, symlinks=None,
54 excludes=None, ignore_case=False):
55 """
56 param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
57 param dst: the destination local folder, wrt to current conanfile dir, to which
58 the files will be copied. Eg: "bin"
59 param src: the source folder in which those files will be searched. This folder
60 will be stripped from the dst name. Eg.: lib/Debug/x86
61 param keep_path: False if you want the relative paths to be maintained from
62 src to dst folders, or just drop. False is useful if you want
63 to collect e.g. many *.libs among many dirs into a single
64 lib dir
65 return: list of copied files
66 """
67 if symlinks is not None:
68 links = symlinks
69 # Check for ../ patterns and allow them
70 if pattern.startswith(".."):
71 rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))
72 base_src = os.path.dirname(rel_dir)
73 pattern = os.path.basename(rel_dir)
74 else:
75 base_src = self._base_src
76
77 src = os.path.join(base_src, src)
78 dst = os.path.join(self._base_dst, dst)
79
80 files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,
81 ignore_case)
82 copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
83 self._link_folders(src, dst, link_folders)
84 self._copied.extend(files_to_copy)
85 return copied_files
86
87 def _filter_files(self, src, pattern, links, excludes, ignore_case):
88
89 """ return a list of the files matching the patterns
90 The list will be relative path names wrt to the root src folder
91 """
92 filenames = []
93 linked_folders = []
94 for root, subfolders, files in os.walk(src, followlinks=True):
95 if root in self._excluded:
96 subfolders[:] = []
97 continue
98
99 if links and os.path.islink(root):
100 linked_folders.append(os.path.relpath(root, src))
101 subfolders[:] = []
102 continue
103 basename = os.path.basename(root)
104 # Skip git or svn subfolders
105 if basename in [".git", ".svn"]:
106 subfolders[:] = []
107 continue
108 if basename == "test_package": # DO NOT export test_package/build folder
109 try:
110 subfolders.remove("build")
111 except:
112 pass
113
114 relative_path = os.path.relpath(root, src)
115 for f in files:
116 relative_name = os.path.normpath(os.path.join(relative_path, f))
117 filenames.append(relative_name)
118
119 if ignore_case:
120 filenames = {f.lower(): f for f in filenames}
121 pattern = pattern.lower()
122
123 files_to_copy = fnmatch.filter(filenames, pattern)
124 if excludes:
125 if not isinstance(excludes, (tuple, list)):
126 excludes = (excludes, )
127 if ignore_case:
128 excludes = [e.lower() for e in excludes]
129 for exclude in excludes:
130 files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
131
132 if ignore_case:
133 files_to_copy = [filenames[f] for f in files_to_copy]
134
135 return files_to_copy, linked_folders
136
137 @staticmethod
138 def _link_folders(src, dst, linked_folders):
139 for linked_folder in linked_folders:
140 link = os.readlink(os.path.join(src, linked_folder))
141 dst_link = os.path.join(dst, linked_folder)
142 try:
143 # Remove the previous symlink
144 os.remove(dst_link)
145 except OSError:
146 pass
147 # link is a string relative to linked_folder
148 # e.g.: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
149 mkdir(os.path.dirname(dst_link))
150 os.symlink(link, dst_link)
151 # Remove empty links
152 for linked_folder in linked_folders:
153 dst_link = os.path.join(dst, linked_folder)
154 abs_path = os.path.realpath(dst_link)
155 if not os.path.exists(abs_path):
156 os.remove(dst_link)
157
158 @staticmethod
159 def _copy_files(files, src, dst, keep_path, symlinks):
160 """ executes a multiple file copy from [(src_file, dst_file), (..)]
161 managing symlinks if necessary
162 """
163 copied_files = []
164 for filename in files:
165 abs_src_name = os.path.join(src, filename)
166 filename = filename if keep_path else os.path.basename(filename)
167 abs_dst_name = os.path.normpath(os.path.join(dst, filename))
168 try:
169 os.makedirs(os.path.dirname(abs_dst_name))
170 except:
171 pass
172 if symlinks and os.path.islink(abs_src_name):
173 linkto = os.readlink(abs_src_name) # @UndefinedVariable
174 try:
175 os.remove(abs_dst_name)
176 except OSError:
177 pass
178 os.symlink(linkto, abs_dst_name) # @UndefinedVariable
179 else:
180 shutil.copy2(abs_src_name, abs_dst_name)
181 copied_files.append(abs_dst_name)
182 return copied_files
183
[end of conans/client/file_copier.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/client/file_copier.py b/conans/client/file_copier.py
--- a/conans/client/file_copier.py
+++ b/conans/client/file_copier.py
@@ -91,6 +91,14 @@
"""
filenames = []
linked_folders = []
+ if excludes:
+ if not isinstance(excludes, (tuple, list)):
+ excludes = (excludes, )
+ if ignore_case:
+ excludes = [e.lower() for e in excludes]
+ else:
+ excludes = []
+
for root, subfolders, files in os.walk(src, followlinks=True):
if root in self._excluded:
subfolders[:] = []
@@ -112,6 +120,11 @@
pass
relative_path = os.path.relpath(root, src)
+ for exclude in excludes:
+ if fnmatch.fnmatch(relative_path, exclude):
+ subfolders[:] = []
+ files = []
+ break
for f in files:
relative_name = os.path.normpath(os.path.join(relative_path, f))
filenames.append(relative_name)
@@ -121,13 +134,8 @@
pattern = pattern.lower()
files_to_copy = fnmatch.filter(filenames, pattern)
- if excludes:
- if not isinstance(excludes, (tuple, list)):
- excludes = (excludes, )
- if ignore_case:
- excludes = [e.lower() for e in excludes]
- for exclude in excludes:
- files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
+ for exclude in excludes:
+ files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
if ignore_case:
files_to_copy = [filenames[f] for f in files_to_copy]
@@ -136,8 +144,20 @@
@staticmethod
def _link_folders(src, dst, linked_folders):
+ created_links = []
for linked_folder in linked_folders:
- link = os.readlink(os.path.join(src, linked_folder))
+ src_link = os.path.join(src, linked_folder)
+ # Discard symlinks that go out of the src folder
+ abs_path = os.path.realpath(src_link)
+ relpath = os.path.relpath(abs_path, src)
+ if relpath.startswith("."):
+ continue
+
+ link = os.readlink(src_link)
+ # Absoluted path symlinks are a problem, convert it to relative
+ if os.path.isabs(link):
+ link = os.path.relpath(link, os.path.dirname(src_link))
+
dst_link = os.path.join(dst, linked_folder)
try:
# Remove the previous symlink
@@ -148,12 +168,19 @@
# e.g.: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
mkdir(os.path.dirname(dst_link))
os.symlink(link, dst_link)
+ created_links.append(dst_link)
# Remove empty links
- for linked_folder in linked_folders:
- dst_link = os.path.join(dst, linked_folder)
+ for dst_link in created_links:
abs_path = os.path.realpath(dst_link)
if not os.path.exists(abs_path):
+ base_path = os.path.dirname(dst_link)
os.remove(dst_link)
+ while base_path.startswith(dst):
+ try: # Take advantage that os.rmdir does not delete non-empty dirs
+ os.rmdir(base_path)
+ except OSError:
+ break # not empty
+ base_path = os.path.dirname(base_path)
@staticmethod
def _copy_files(files, src, dst, keep_path, symlinks):
|
{"golden_diff": "diff --git a/conans/client/file_copier.py b/conans/client/file_copier.py\n--- a/conans/client/file_copier.py\n+++ b/conans/client/file_copier.py\n@@ -91,6 +91,14 @@\n \"\"\"\n filenames = []\n linked_folders = []\n+ if excludes:\n+ if not isinstance(excludes, (tuple, list)):\n+ excludes = (excludes, )\n+ if ignore_case:\n+ excludes = [e.lower() for e in excludes]\n+ else:\n+ excludes = []\n+\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n@@ -112,6 +120,11 @@\n pass\n \n relative_path = os.path.relpath(root, src)\n+ for exclude in excludes:\n+ if fnmatch.fnmatch(relative_path, exclude):\n+ subfolders[:] = []\n+ files = []\n+ break\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n@@ -121,13 +134,8 @@\n pattern = pattern.lower()\n \n files_to_copy = fnmatch.filter(filenames, pattern)\n- if excludes:\n- if not isinstance(excludes, (tuple, list)):\n- excludes = (excludes, )\n- if ignore_case:\n- excludes = [e.lower() for e in excludes]\n- for exclude in excludes:\n- files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n+ for exclude in excludes:\n+ files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n \n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n@@ -136,8 +144,20 @@\n \n @staticmethod\n def _link_folders(src, dst, linked_folders):\n+ created_links = []\n for linked_folder in linked_folders:\n- link = os.readlink(os.path.join(src, linked_folder))\n+ src_link = os.path.join(src, linked_folder)\n+ # Discard symlinks that go out of the src folder\n+ abs_path = os.path.realpath(src_link)\n+ relpath = os.path.relpath(abs_path, src)\n+ if relpath.startswith(\".\"):\n+ continue\n+ \n+ link = os.readlink(src_link)\n+ # Absoluted path symlinks are a problem, convert it to relative\n+ if os.path.isabs(link):\n+ link = os.path.relpath(link, os.path.dirname(src_link))\n+\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n@@ -148,12 +168,19 @@\n # e.g.: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n+ created_links.append(dst_link)\n # Remove empty links\n- for linked_folder in linked_folders:\n- dst_link = os.path.join(dst, linked_folder)\n+ for dst_link in created_links:\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n+ base_path = os.path.dirname(dst_link)\n os.remove(dst_link)\n+ while base_path.startswith(dst):\n+ try: # Take advantage that os.rmdir does not delete non-empty dirs\n+ os.rmdir(base_path)\n+ except OSError:\n+ break # not empty\n+ base_path = os.path.dirname(base_path)\n \n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n", "issue": "Fix symlinks not copied during export\n- [x] Refer to the issue that supports this Pull Request: fixes #3258\r\n- [x] If the issue has missing info, explain the purpose/use case/pain/need that covers this Pull Request.\r\n- [x] I've read the [Contributing guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've followed the PEP8 style guides for Python code.\r\n- [ ] I've opened another PR in the Conan docs repo to the ``develop`` branch, documenting this one. Also adding a description of the changes in the ``changelog.rst`` file. https://github.com/conan-io/docs\r\n\n", "before_files": [{"content": "import os\nimport fnmatch\nimport shutil\nfrom collections import defaultdict\n\nfrom conans.util.files import mkdir\n\n\ndef report_copied_files(copied, output):\n ext_files = defaultdict(list)\n for f in copied:\n _, ext = os.path.splitext(f)\n ext_files[ext].append(os.path.basename(f))\n\n if not ext_files:\n return False\n\n for ext, files in ext_files.items():\n files_str = (\", \".join(files)) if len(files) < 5 else \"\"\n file_or_files = \"file\" if len(files) == 1 else \"files\"\n if not ext:\n output.info(\"Copied %d %s: %s\" % (len(files), file_or_files, files_str))\n else:\n output.info(\"Copied %d '%s' %s: %s\" % (len(files), ext, file_or_files, files_str))\n return True\n\n\nclass FileCopier(object):\n \"\"\" main responsible of copying files from place to place:\n package: build folder -> package folder\n imports: package folder -> user folder\n export: user folder -> store \"export\" folder\n \"\"\"\n def __init__(self, root_source_folder, root_destination_folder, excluded=None):\n \"\"\"\n Takes the base folders to copy resources src -> dst. These folders names\n will not be used in the relative names while copying\n param root_source_folder: The base folder to copy things from, typically the\n store build folder\n param root_destination_folder: The base folder to copy things to, typicall the\n store package folder\n \"\"\"\n self._base_src = root_source_folder\n self._base_dst = root_destination_folder\n self._copied = []\n self._excluded = [root_destination_folder]\n if excluded:\n self._excluded.append(excluded)\n\n def report(self, output):\n return report_copied_files(self._copied, output)\n\n def __call__(self, pattern, dst=\"\", src=\"\", keep_path=True, links=False, symlinks=None,\n excludes=None, ignore_case=False):\n \"\"\"\n param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll\n param dst: the destination local folder, wrt to current conanfile dir, to which\n the files will be copied. Eg: \"bin\"\n param src: the source folder in which those files will be searched. This folder\n will be stripped from the dst name. Eg.: lib/Debug/x86\n param keep_path: False if you want the relative paths to be maintained from\n src to dst folders, or just drop. False is useful if you want\n to collect e.g. many *.libs among many dirs into a single\n lib dir\n return: list of copied files\n \"\"\"\n if symlinks is not None:\n links = symlinks\n # Check for ../ patterns and allow them\n if pattern.startswith(\"..\"):\n rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))\n base_src = os.path.dirname(rel_dir)\n pattern = os.path.basename(rel_dir)\n else:\n base_src = self._base_src\n\n src = os.path.join(base_src, src)\n dst = os.path.join(self._base_dst, dst)\n\n files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,\n ignore_case)\n copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)\n self._link_folders(src, dst, link_folders)\n self._copied.extend(files_to_copy)\n return copied_files\n\n def _filter_files(self, src, pattern, links, excludes, ignore_case):\n\n \"\"\" return a list of the files matching the patterns\n The list will be relative path names wrt to the root src folder\n \"\"\"\n filenames = []\n linked_folders = []\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n continue\n\n if links and os.path.islink(root):\n linked_folders.append(os.path.relpath(root, src))\n subfolders[:] = []\n continue\n basename = os.path.basename(root)\n # Skip git or svn subfolders\n if basename in [\".git\", \".svn\"]:\n subfolders[:] = []\n continue\n if basename == \"test_package\": # DO NOT export test_package/build folder\n try:\n subfolders.remove(\"build\")\n except:\n pass\n\n relative_path = os.path.relpath(root, src)\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n\n if ignore_case:\n filenames = {f.lower(): f for f in filenames}\n pattern = pattern.lower()\n\n files_to_copy = fnmatch.filter(filenames, pattern)\n if excludes:\n if not isinstance(excludes, (tuple, list)):\n excludes = (excludes, )\n if ignore_case:\n excludes = [e.lower() for e in excludes]\n for exclude in excludes:\n files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n\n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n\n return files_to_copy, linked_folders\n\n @staticmethod\n def _link_folders(src, dst, linked_folders):\n for linked_folder in linked_folders:\n link = os.readlink(os.path.join(src, linked_folder))\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n os.remove(dst_link)\n except OSError:\n pass\n # link is a string relative to linked_folder\n # e.g.: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n # Remove empty links\n for linked_folder in linked_folders:\n dst_link = os.path.join(dst, linked_folder)\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n os.remove(dst_link)\n\n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n \"\"\" executes a multiple file copy from [(src_file, dst_file), (..)]\n managing symlinks if necessary\n \"\"\"\n copied_files = []\n for filename in files:\n abs_src_name = os.path.join(src, filename)\n filename = filename if keep_path else os.path.basename(filename)\n abs_dst_name = os.path.normpath(os.path.join(dst, filename))\n try:\n os.makedirs(os.path.dirname(abs_dst_name))\n except:\n pass\n if symlinks and os.path.islink(abs_src_name):\n linkto = os.readlink(abs_src_name) # @UndefinedVariable\n try:\n os.remove(abs_dst_name)\n except OSError:\n pass\n os.symlink(linkto, abs_dst_name) # @UndefinedVariable\n else:\n shutil.copy2(abs_src_name, abs_dst_name)\n copied_files.append(abs_dst_name)\n return copied_files\n", "path": "conans/client/file_copier.py"}]}
| 2,710 | 848 |
gh_patches_debug_18014
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1606
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tf1 plugin better stream
Hi,
Are you sure there isn't better stream than 360p for TF1 ?
[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/tf1/direct
[cli][info] Available streams: 496k (worst), 234p_alt, 234p, 896k, 360p_alt, 360p (best)
I have a far better quality with a web browser than best.
</issue>
<code>
[start of src/streamlink/plugins/tf1.py]
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http, useragents
6 from streamlink.stream import HDSStream
7 from streamlink.stream import HLSStream
8
9
10 class TF1(Plugin):
11 url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?")
12 embed_url = "http://www.wat.tv/embedframe/live{0}"
13 embed_re = re.compile(r"urlLive.*?:.*?\"(http.*?)\"", re.MULTILINE)
14 api_url = "http://www.wat.tv/get/{0}/591997"
15 swf_url = "http://www.wat.tv/images/v70/PlayerLite.swf"
16 hds_channel_remap = {"tf1": "androidliveconnect", "lci": "androidlivelci", "tfx" : "nt1live", "tf1-series-films" : "hd1live" }
17 hls_channel_remap = {"lci": "LCI", "tf1": "V4", "tfx" : "nt1", "tf1-series-films" : "hd1" }
18
19 @classmethod
20 def can_handle_url(cls, url):
21 return cls.url_re.match(url) is not None
22
23 def _get_hds_streams(self, channel):
24 channel = self.hds_channel_remap.get(channel, "{0}live".format(channel))
25 manifest_url = http.get(self.api_url.format(channel),
26 params={"getURL": 1},
27 headers={"User-Agent": useragents.FIREFOX}).text
28
29 for s in HDSStream.parse_manifest(self.session,
30 manifest_url,
31 pvswf=self.swf_url,
32 headers={"User-Agent": useragents.FIREFOX}).items():
33 yield s
34
35 def _get_hls_streams(self, channel):
36 channel = self.hls_channel_remap.get(channel, channel)
37 embed_url = self.embed_url.format(channel)
38 self.logger.debug("Found embed URL: {0}", embed_url)
39 # page needs to have a mobile user agent
40 embed_page = http.get(embed_url, headers={"User-Agent": useragents.ANDROID})
41
42 m = self.embed_re.search(embed_page.text)
43 if m:
44 hls_stream_url = m.group(1)
45
46 try:
47 for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
48 yield s
49 except Exception:
50 self.logger.error("Failed to load the HLS playlist for {0}", channel)
51
52 def _get_streams(self):
53 m = self.url_re.match(self.url)
54 if m:
55 channel = m.group(1) or m.group(2)
56 self.logger.debug("Found channel {0}", channel)
57 for s in self._get_hds_streams(channel):
58 yield s
59
60 for s in self._get_hls_streams(channel):
61 yield s
62
63
64 __plugin__ = TF1
65
[end of src/streamlink/plugins/tf1.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py
--- a/src/streamlink/plugins/tf1.py
+++ b/src/streamlink/plugins/tf1.py
@@ -1,6 +1,7 @@
from __future__ import print_function
import re
+from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HDSStream
@@ -41,8 +42,9 @@
m = self.embed_re.search(embed_page.text)
if m:
- hls_stream_url = m.group(1)
-
+ o = urlparse(m.group(1))
+ prms = dict(parse_qsl(o.query))
+ hls_stream_url = "{0}://{1}{2}?hdnea={3}".format(o.scheme, o.netloc, o.path, prms["hdnea"])
try:
for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
yield s
|
{"golden_diff": "diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py\n--- a/src/streamlink/plugins/tf1.py\n+++ b/src/streamlink/plugins/tf1.py\n@@ -1,6 +1,7 @@\n from __future__ import print_function\n import re\n \n+from streamlink.compat import urlparse, parse_qsl\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http, useragents\n from streamlink.stream import HDSStream\n@@ -41,8 +42,9 @@\n \n m = self.embed_re.search(embed_page.text)\n if m:\n- hls_stream_url = m.group(1)\n-\n+ o = urlparse(m.group(1))\n+ prms = dict(parse_qsl(o.query))\n+ hls_stream_url = \"{0}://{1}{2}?hdnea={3}\".format(o.scheme, o.netloc, o.path, prms[\"hdnea\"])\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n", "issue": "tf1 plugin better stream\nHi,\r\n\r\nAre you sure there isn't better stream than 360p for TF1 ?\r\n[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/tf1/direct\r\n[cli][info] Available streams: 496k (worst), 234p_alt, 234p, 896k, 360p_alt, 360p (best)\r\n\r\nI have a far better quality with a web browser than best.\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, useragents\nfrom streamlink.stream import HDSStream\nfrom streamlink.stream import HLSStream\n\n\nclass TF1(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.)?(?:tf1\\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?\")\n embed_url = \"http://www.wat.tv/embedframe/live{0}\"\n embed_re = re.compile(r\"urlLive.*?:.*?\\\"(http.*?)\\\"\", re.MULTILINE)\n api_url = \"http://www.wat.tv/get/{0}/591997\"\n swf_url = \"http://www.wat.tv/images/v70/PlayerLite.swf\"\n hds_channel_remap = {\"tf1\": \"androidliveconnect\", \"lci\": \"androidlivelci\", \"tfx\" : \"nt1live\", \"tf1-series-films\" : \"hd1live\" }\n hls_channel_remap = {\"lci\": \"LCI\", \"tf1\": \"V4\", \"tfx\" : \"nt1\", \"tf1-series-films\" : \"hd1\" }\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_hds_streams(self, channel):\n channel = self.hds_channel_remap.get(channel, \"{0}live\".format(channel))\n manifest_url = http.get(self.api_url.format(channel),\n params={\"getURL\": 1},\n headers={\"User-Agent\": useragents.FIREFOX}).text\n\n for s in HDSStream.parse_manifest(self.session,\n manifest_url,\n pvswf=self.swf_url,\n headers={\"User-Agent\": useragents.FIREFOX}).items():\n yield s\n\n def _get_hls_streams(self, channel):\n channel = self.hls_channel_remap.get(channel, channel)\n embed_url = self.embed_url.format(channel)\n self.logger.debug(\"Found embed URL: {0}\", embed_url)\n # page needs to have a mobile user agent\n embed_page = http.get(embed_url, headers={\"User-Agent\": useragents.ANDROID})\n\n m = self.embed_re.search(embed_page.text)\n if m:\n hls_stream_url = m.group(1)\n\n try:\n for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n yield s\n except Exception:\n self.logger.error(\"Failed to load the HLS playlist for {0}\", channel)\n\n def _get_streams(self):\n m = self.url_re.match(self.url)\n if m:\n channel = m.group(1) or m.group(2)\n self.logger.debug(\"Found channel {0}\", channel)\n for s in self._get_hds_streams(channel):\n yield s\n\n for s in self._get_hls_streams(channel):\n yield s\n\n\n__plugin__ = TF1\n", "path": "src/streamlink/plugins/tf1.py"}]}
| 1,443 | 230 |
gh_patches_debug_1466
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-1813
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Red input fields on first page load
When loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.

</issue>
<code>
[start of meinberlin/apps/users/fields.py]
1 import re
2
3 from django import forms
4 from django.core.exceptions import ValidationError
5 from django.core.validators import EmailValidator
6 from django.forms import widgets
7 from django.utils.translation import ugettext_lazy as _
8
9
10 class CommaSeparatedEmailField(forms.Field):
11 email_validator = EmailValidator(
12 message=_('Please enter correct email addresses, separated by '
13 'commas.')
14 )
15
16 widget = widgets.TextInput(attrs={
17 'placeholder': '[email protected], [email protected], '
18 '[email protected],…'
19 })
20
21 def to_python(self, value):
22 if not value:
23 return []
24
25 emails = []
26 for email in value.split(','):
27 email = email.strip()
28 self.email_validator(email)
29 emails.append(email)
30
31 return emails
32
33
34 class EmailFileField(forms.FileField):
35 """Extract emails from uploaded text files."""
36
37 widget = widgets.FileInput
38 # Find possible email strings. Emails may be quoted and separated by
39 # whitespaces, commas, semicolons or < and >.
40 email_regex = re.compile(r'[^\s;,"\'<]+@[^\s;,"\'>]+\.[a-z]{2,}')
41 email_validator = EmailValidator()
42
43 def clean(self, data, initial=None):
44 file = super().clean(data, initial)
45 return self._extract_emails(file)
46
47 def _extract_emails(self, file):
48 if not file:
49 return []
50
51 emails = []
52 for byteline in file:
53 # As it is difficult to guess the correct encoding of a file,
54 # email addresses are restricted to contain only ascii letters.
55 # This works for every encoding which is a superset of ascii like
56 # utf-8 and latin-1. Non ascii chars are simply ignored.
57 line = byteline.decode('ascii', 'ignore')
58 for match in self.email_regex.finditer(line):
59 email = match.group(0)
60 if self.is_valid_email(email):
61 emails.append(email)
62 return emails
63
64 def is_valid_email(self, email):
65 try:
66 self.email_validator(email)
67 return True
68 except ValidationError:
69 return False
70
[end of meinberlin/apps/users/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py
--- a/meinberlin/apps/users/fields.py
+++ b/meinberlin/apps/users/fields.py
@@ -14,8 +14,7 @@
)
widget = widgets.TextInput(attrs={
- 'placeholder': '[email protected], [email protected], '
- '[email protected],…'
+ 'placeholder': '[email protected], [email protected],…'
})
def to_python(self, value):
|
{"golden_diff": "diff --git a/meinberlin/apps/users/fields.py b/meinberlin/apps/users/fields.py\n--- a/meinberlin/apps/users/fields.py\n+++ b/meinberlin/apps/users/fields.py\n@@ -14,8 +14,7 @@\n )\n \n widget = widgets.TextInput(attrs={\n- 'placeholder': '[email protected], [email protected], '\n- '[email protected],\u2026'\n+ 'placeholder': '[email protected], [email protected],\u2026'\n })\n \n def to_python(self, value):\n", "issue": "Red input fields on first page load\nWhen loading some pages, input fields are redish, suggesting there where validation errors, but there was not input submit yet.\r\n\r\n\n", "before_files": [{"content": "import re\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator\nfrom django.forms import widgets\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CommaSeparatedEmailField(forms.Field):\n email_validator = EmailValidator(\n message=_('Please enter correct email addresses, separated by '\n 'commas.')\n )\n\n widget = widgets.TextInput(attrs={\n 'placeholder': '[email protected], [email protected], '\n '[email protected],\u2026'\n })\n\n def to_python(self, value):\n if not value:\n return []\n\n emails = []\n for email in value.split(','):\n email = email.strip()\n self.email_validator(email)\n emails.append(email)\n\n return emails\n\n\nclass EmailFileField(forms.FileField):\n \"\"\"Extract emails from uploaded text files.\"\"\"\n\n widget = widgets.FileInput\n # Find possible email strings. Emails may be quoted and separated by\n # whitespaces, commas, semicolons or < and >.\n email_regex = re.compile(r'[^\\s;,\"\\'<]+@[^\\s;,\"\\'>]+\\.[a-z]{2,}')\n email_validator = EmailValidator()\n\n def clean(self, data, initial=None):\n file = super().clean(data, initial)\n return self._extract_emails(file)\n\n def _extract_emails(self, file):\n if not file:\n return []\n\n emails = []\n for byteline in file:\n # As it is difficult to guess the correct encoding of a file,\n # email addresses are restricted to contain only ascii letters.\n # This works for every encoding which is a superset of ascii like\n # utf-8 and latin-1. Non ascii chars are simply ignored.\n line = byteline.decode('ascii', 'ignore')\n for match in self.email_regex.finditer(line):\n email = match.group(0)\n if self.is_valid_email(email):\n emails.append(email)\n return emails\n\n def is_valid_email(self, email):\n try:\n self.email_validator(email)\n return True\n except ValidationError:\n return False\n", "path": "meinberlin/apps/users/fields.py"}]}
| 1,244 | 122 |
gh_patches_debug_14599
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2939
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Integrate DRF Spectacular with Mathesar's backend codebase
## Why DRF Spectacular
-DRF Spectacular automatically generates comprehensive documentation based on OpenAPI specification for Mathesar's API.
-It is compatible with Mathesar's Django REST Framework
## Proposed solution
-Add the "drf-spectacular" package to the project's dependencies
-Configure the SPECTACULAR_SETTINGS in the Mathesar's settings file
</issue>
<code>
[start of config/settings/common_settings.py]
1 """
2 Base settings to build other settings files upon.
3
4 Generated by 'django-admin startproject' using Django 3.1.7.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from pathlib import Path
15
16 from decouple import Csv, config as decouple_config
17 from dj_database_url import parse as db_url
18
19
20 # We use a 'tuple' with pipes as delimiters as decople naively splits the global
21 # variables on commas when casting to Csv()
22 def pipe_delim(pipe_string):
23 # Remove opening and closing brackets
24 pipe_string = pipe_string[1:-1]
25 # Split on pipe delim
26 return pipe_string.split("|")
27
28
29 # Build paths inside the project like this: BASE_DIR / 'subdir'.
30 BASE_DIR = Path(__file__).resolve().parent.parent.parent
31
32 # Application definition
33
34 INSTALLED_APPS = [
35 "django.contrib.admin",
36 "django.contrib.auth",
37 "django.contrib.contenttypes",
38 "django.contrib.sessions",
39 "django.contrib.messages",
40 "whitenoise.runserver_nostatic",
41 "django.contrib.staticfiles",
42 "rest_framework",
43 "django_filters",
44 "django_property_filter",
45 "mathesar",
46 ]
47
48 MIDDLEWARE = [
49 "django.middleware.security.SecurityMiddleware",
50 "whitenoise.middleware.WhiteNoiseMiddleware",
51 "django.contrib.sessions.middleware.SessionMiddleware",
52 "django.middleware.common.CommonMiddleware",
53 "django.middleware.csrf.CsrfViewMiddleware",
54 "django.contrib.auth.middleware.AuthenticationMiddleware",
55 "django.contrib.messages.middleware.MessageMiddleware",
56 "django.middleware.clickjacking.XFrameOptionsMiddleware",
57 "mathesar.middleware.CursorClosedHandlerMiddleware",
58 "mathesar.middleware.PasswordChangeNeededMiddleware",
59 'django_userforeignkey.middleware.UserForeignKeyMiddleware',
60 'django_request_cache.middleware.RequestCacheMiddleware',
61 ]
62
63 ROOT_URLCONF = "config.urls"
64
65 TEMPLATES = [
66 {
67 "BACKEND": "django.template.backends.django.DjangoTemplates",
68 "DIRS": [],
69 "APP_DIRS": True,
70 "OPTIONS": {
71 "context_processors": [
72 "config.context_processors.frontend_settings",
73 "django.template.context_processors.debug",
74 "django.template.context_processors.request",
75 "django.contrib.auth.context_processors.auth",
76 "django.contrib.messages.context_processors.messages",
77 "mathesar.template_context_processors.base_template_extensions.script_extension_templates"
78 ],
79 },
80 },
81 ]
82
83 WSGI_APPLICATION = "config.wsgi.application"
84
85 # Database
86 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
87
88 # TODO: Add to documentation that database keys should not be than 128 characters.
89
90 # MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'
91 # See pipe_delim above for why we use pipes as delimiters
92 DATABASES = {
93 db_key: db_url(url_string)
94 for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))
95 }
96 DATABASES[decouple_config('DJANGO_DATABASE_KEY', default="default")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)
97
98 for db_key, db_dict in DATABASES.items():
99 # Engine can be '.postgresql' or '.postgresql_psycopg2'
100 if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):
101 raise ValueError(
102 f"{db_key} is not a PostgreSQL database. "
103 f"{db_dict['ENGINE']} found for {db_key}'s engine."
104 )
105
106 # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'
107 # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']
108 TEST = decouple_config('TEST', default=False, cast=bool)
109 if TEST:
110 for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):
111 DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}
112
113
114 # SECURITY WARNING: keep the secret key used in production secret!
115 SECRET_KEY = decouple_config('SECRET_KEY')
116
117 # SECURITY WARNING: don't run with debug turned on in production!
118 DEBUG = decouple_config('DEBUG', default=False, cast=bool)
119
120 ALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())
121
122 # Password validation
123 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
124
125 AUTH_PASSWORD_VALIDATORS = [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139
140 # Internationalization
141 # https://docs.djangoproject.com/en/3.1/topics/i18n/
142
143 LANGUAGE_CODE = "en-us"
144
145 TIME_ZONE = "UTC"
146
147 USE_I18N = True
148
149 USE_L10N = True
150
151 USE_TZ = True
152
153 # Static files (CSS, JavaScript, Images)
154 # https://docs.djangoproject.com/en/3.1/howto/static-files/
155 # https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/
156
157 STATIC_URL = "/static/"
158
159 # When running with DEBUG=False, the webserver needs to serve files from this location
160 # python manage.py collectstatic has to be run to collect all static files into this location
161 # The files need to served in brotli or gzip compressed format
162 STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
163
164 # Media files (uploaded by the user)
165
166 MEDIA_ROOT = os.path.join(BASE_DIR, '.media/')
167
168 MEDIA_URL = "/media/"
169
170 # Update Authentication classes, removed BasicAuthentication
171 # Defaults: https://www.django-rest-framework.org/api-guide/settings/
172 REST_FRAMEWORK = {
173 'DEFAULT_AUTHENTICATION_CLASSES': [
174 'rest_framework.authentication.TokenAuthentication',
175 'rest_framework.authentication.SessionAuthentication'
176 ],
177 'DEFAULT_PERMISSION_CLASSES': [
178 'rest_framework.permissions.IsAuthenticated',
179 ],
180 'DEFAULT_FILTER_BACKENDS': (
181 'django_filters.rest_framework.DjangoFilterBackend',
182 'rest_framework.filters.OrderingFilter',
183 ),
184 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
185 'EXCEPTION_HANDLER':
186 'mathesar.exception_handlers.mathesar_exception_handler'
187 }
188 FRIENDLY_ERRORS = {
189 'FIELD_ERRORS': {
190 # By default drf-friendly-errors does contain error codes for ListSerializer type
191 'ListSerializer': {
192 'required': 2007,
193 'null': 2027,
194 'invalid_choice': 2083,
195 'not_a_list': 2123,
196 'empty': 2093
197 },
198 'PermittedPkRelatedField': {
199 'required': 2007,
200 'null': 2027,
201 'does_not_exist': 2151,
202 'incorrect_type': 2161
203 },
204 'PermittedSlugRelatedField': {
205 'required': 2007, 'invalid': 2002, 'null': 2027,
206 'does_not_exist': 2151, 'incorrect_type': 2161
207 },
208 },
209 'EXCEPTION_DICT': {
210 'Http404': 4005
211 }
212 }
213 # Mathesar settings
214 MATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')
215 MATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')
216 MATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')
217 MATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'
218 MATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')
219 MATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)
220 MATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')
221
222 # UI source files have to be served by Django in order for static assets to be included during dev mode
223 # https://vitejs.dev/guide/assets.html
224 # https://vitejs.dev/guide/backend-integration.html
225 STATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]
226 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
227
228 # Accounts
229 AUTH_USER_MODEL = 'mathesar.User'
230 LOGIN_URL = '/auth/login/'
231 LOGIN_REDIRECT_URL = '/'
232 LOGOUT_REDIRECT_URL = LOGIN_URL
233 DRF_ACCESS_POLICY = {
234 'reusable_conditions': ['mathesar.api.permission_conditions']
235 }
236 # List of Template names that contains additional script tags to be added to the base template
237 BASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []
238
[end of config/settings/common_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/config/settings/common_settings.py b/config/settings/common_settings.py
--- a/config/settings/common_settings.py
+++ b/config/settings/common_settings.py
@@ -42,6 +42,7 @@
"rest_framework",
"django_filters",
"django_property_filter",
+ "drf_spectacular",
"mathesar",
]
@@ -183,7 +184,15 @@
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'EXCEPTION_HANDLER':
- 'mathesar.exception_handlers.mathesar_exception_handler'
+ 'mathesar.exception_handlers.mathesar_exception_handler',
+ 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'
+}
+SPECTACULAR_SETTINGS = {
+ 'TITLE': 'Mathesar API',
+ 'DESCRIPTION': '',
+ 'VERSION': '1.0.0',
+ 'SERVE_INCLUDE_SCHEMA': False,
+ # OTHER SETTINGS
}
FRIENDLY_ERRORS = {
'FIELD_ERRORS': {
|
{"golden_diff": "diff --git a/config/settings/common_settings.py b/config/settings/common_settings.py\n--- a/config/settings/common_settings.py\n+++ b/config/settings/common_settings.py\n@@ -42,6 +42,7 @@\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n+ \"drf_spectacular\",\n \"mathesar\",\n ]\n \n@@ -183,7 +184,15 @@\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER':\n- 'mathesar.exception_handlers.mathesar_exception_handler'\n+ 'mathesar.exception_handlers.mathesar_exception_handler',\n+ 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'\n+}\n+SPECTACULAR_SETTINGS = {\n+ 'TITLE': 'Mathesar API',\n+ 'DESCRIPTION': '',\n+ 'VERSION': '1.0.0',\n+ 'SERVE_INCLUDE_SCHEMA': False,\n+ # OTHER SETTINGS\n }\n FRIENDLY_ERRORS = {\n 'FIELD_ERRORS': {\n", "issue": "Integrate DRF Spectacular with Mathesar's backend codebase\n## Why DRF Spectacular\r\n-DRF Spectacular automatically generates comprehensive documentation based on OpenAPI specification for Mathesar's API. \r\n-It is compatible with Mathesar's Django REST Framework\r\n## Proposed solution\r\n-Add the \"drf-spectacular\" package to the project's dependencies\r\n-Configure the SPECTACULAR_SETTINGS in the Mathesar's settings file \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nBase settings to build other settings files upon.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"mathesar.middleware.CursorClosedHandlerMiddleware\",\n \"mathesar.middleware.PasswordChangeNeededMiddleware\",\n 'django_userforeignkey.middleware.UserForeignKeyMiddleware',\n 'django_request_cache.middleware.RequestCacheMiddleware',\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"mathesar.template_context_processors.base_template_extensions.script_extension_templates\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY', default=\"default\")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nTEST = decouple_config('TEST', default=False, cast=bool)\nif TEST:\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER':\n 'mathesar.exception_handlers.mathesar_exception_handler'\n}\nFRIENDLY_ERRORS = {\n 'FIELD_ERRORS': {\n # By default drf-friendly-errors does contain error codes for ListSerializer type\n 'ListSerializer': {\n 'required': 2007,\n 'null': 2027,\n 'invalid_choice': 2083,\n 'not_a_list': 2123,\n 'empty': 2093\n },\n 'PermittedPkRelatedField': {\n 'required': 2007,\n 'null': 2027,\n 'does_not_exist': 2151,\n 'incorrect_type': 2161\n },\n 'PermittedSlugRelatedField': {\n 'required': 2007, 'invalid': 2002, 'null': 2027,\n 'does_not_exist': 2151, 'incorrect_type': 2161\n },\n },\n 'EXCEPTION_DICT': {\n 'Http404': 4005\n }\n}\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\nMATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')\nMATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)\nMATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')\n\n# UI source files have to be served by Django in order for static assets to be included during dev mode\n# https://vitejs.dev/guide/assets.html\n# https://vitejs.dev/guide/backend-integration.html\nSTATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Accounts\nAUTH_USER_MODEL = 'mathesar.User'\nLOGIN_URL = '/auth/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = LOGIN_URL\nDRF_ACCESS_POLICY = {\n 'reusable_conditions': ['mathesar.api.permission_conditions']\n}\n# List of Template names that contains additional script tags to be added to the base template\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []\n", "path": "config/settings/common_settings.py"}]}
| 3,230 | 215 |
gh_patches_debug_7117
|
rasdani/github-patches
|
git_diff
|
napari__napari-413
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Massive traceback when deleting a (markers) layer
## 🐛 Bug
Sorry, no time to investigate now, but here is the traceback I saw when deleting a markers layer (using the trash icon):
<details>
```pytb
In [15]: WARNING: Traceback (most recent call last):
File "/Users/jni/conda/envs/36/bin/ipython", line 11, in <module>
sys.exit(start_ipython())
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/__init__.py", line 125, in start_ipython
return launch_new_instance(argv=argv, **kwargs)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/ipapp.py", line 356, in start
self.shell.mainloop()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 480, in mainloop
self.interact()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 463, in interact
code = self.prompt_for_code()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 376, in prompt_for_code
pre_run=self.pre_prompt, reset_current_buffer=True)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 415, in run
self.eventloop.run(self.input, self.create_eventloop_callbacks())
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/posix.py", line 102, in run
self._inputhook_context.call_inputhook(ready)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/inputhook.py", line 74, in call_inputhook
self.inputhook(self)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py", line 495, in inputhook
self._inputhook(context)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/pt_inputhooks/qt.py", line 35, in inputhook
event_loop.exec_()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py", line 182, in remove_selected
self.pop(i)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/list/_model.py", line 55, in pop
self.changed.removed(item=obj)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py", line 489, in __call__
self._invoke_callback(cb, event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py", line 504, in _invoke_callback
cb(event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py", line 29, in _remove
layer.viewer = None
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/model.py", line 162, in viewer
self._parent = parent
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/_visual_wrapper.py", line 82, in _parent
self._node.parent = parent
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/frozen.py", line 17, in __setattr__
object.__setattr__(self, key, value)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/scene/node.py", line 209, in parent
self._update_trsys(None)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/_vispy/scene/visuals.py", line 82, in _update_trsys
self.transforms.scene_transform = scene.node_transform(doc)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/transform_system.py", line 276, in scene_transform
self._scene_transform.transforms = tr
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py", line 96, in transforms
self.update()
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py", line 153, in update
self.changed(*args)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 455, in __call__
self._invoke_callback(cb, event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 471, in _invoke_callback
cb(event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py", line 212, in _subtr_changed
self.update(ev)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py", line 153, in update
self.changed(*args)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 455, in __call__
self._invoke_callback(cb, event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 475, in _invoke_callback
self, cb_event=(cb, event))
<< caught exception here: >>
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py", line 471, in _invoke_callback
cb(event)
File "/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py", line 281, in source_changed
new_tr = [tr[0]]
IndexError: list index out of range
ERROR: Invoking <bound method SimplifiedChainTransform.source_changed of <ChainTransform [<STTransform scale=[1. 1. 1. 1.] translate=[0. 0. 0. 0.] at 0x5071730616>,
<STTransform scale=[2.6181054e+00 2.6181054e+00 1.0000000e-06 1.0000000e+00] translate=[-558.83954 -403.84387 0. 0. ] at 0x5319047544>] at 0x12e27aba8>> for Event
```
</details>
## To Reproduce
Approximately: (not sure which bits are important, maybe only the final step matters)
- add a (3D) image
- add a second image (segmentation in this case)
- decrease opacity of second image
- add a markers layer (with the + button)
- set annotation mode
- pan and zoom while holding down space
- hit the delete button.
## Expected behavior
Console should stay nice and clean when layers are deleted. ;)
## Environment
- napari Version (e.g., 1.0): 0.0.6 (from PyPI)
- OS (e.g., Linux): macOS Mojave
- Python version: 3.6
- Any other relevant information:
</issue>
<code>
[start of napari/components/layerlist.py]
1 from ..layers import Layer
2
3 from ..util.naming import inc_name_count
4 from ..util.list import ListModel
5
6
7 def _add(event):
8 """When a layer is added, set its name and order."""
9 layers = event.source
10 layer = event.item
11 layer.name = layers._coerce_name(layer.name, layer)
12 layer._order = -len(layers)
13 layer.events.name.connect(lambda e: layers._update_name(e))
14 layers.unselect_all(ignore=layer)
15
16
17 def _remove(event):
18 """When a layer is removed, remove its viewer."""
19 layers = event.source
20 layer = event.item
21 layer._order = 0
22 layer._node.parent = None
23
24
25 def _reorder(event):
26 """When the list is reordered, propagate those changes to draw order."""
27 layers = event.source
28 for i in range(len(layers)):
29 layers[i]._order = -i
30
31
32 class LayerList(ListModel):
33 """List-like layer collection with built-in reordering and callback hooks.
34
35 Attributes
36 ----------
37 events : vispy.util.event.EmitterGroup
38 Event hooks:
39 * added(item, index): whenever an item is added
40 * removed(item): whenever an item is removed
41 * reordered(): whenever the list is reordered
42 """
43
44 def __init__(self):
45 super().__init__(
46 basetype=Layer, lookup={str: lambda q, e: q == e.name}
47 )
48
49 self.events.added.connect(_add)
50 self.events.removed.connect(_remove)
51 self.events.reordered.connect(_reorder)
52
53 def __newlike__(self, iterable):
54 return ListModel(self._basetype, iterable, self._lookup)
55
56 def _coerce_name(self, name, layer=None):
57 """Coerce a name into a unique equivalent.
58
59 Parameters
60 ----------
61 name : str
62 Original name.
63 layer : Layer, optional
64 Layer for which name is generated.
65
66 Returns
67 -------
68 new_name : str
69 Coerced, unique name.
70 """
71 for l in self:
72 if l is layer:
73 continue
74 if l.name == name:
75 name = inc_name_count(name)
76
77 return name
78
79 def _update_name(self, event):
80 """Coerce name of the layer in `event.layer`."""
81 layer = event.source
82 layer.name = self._coerce_name(layer.name, layer)
83
84 def move_selected(self, index, insert):
85 """Reorder list by moving the item at index and inserting it
86 at the insert index. If additional items are selected these will
87 get inserted at the insert index too. This allows for rearranging
88 the list based on dragging and dropping a selection of items, where
89 index is the index of the primary item being dragged, and insert is
90 the index of the drop location, and the selection indicates if
91 multiple items are being dragged. If the moved layer is not selected
92 select it.
93
94 Parameters
95 ----------
96 index : int
97 Index of primary item to be moved
98 insert : int
99 Index that item(s) will be inserted at
100 """
101 total = len(self)
102 indices = list(range(total))
103 if not self[index].selected:
104 self.unselect_all()
105 self[index].selected = True
106 selected = [i for i in range(total) if self[i].selected]
107
108 # remove all indices to be moved
109 for i in selected:
110 indices.remove(i)
111 # adjust offset based on selected indices to move
112 offset = sum([i < insert and i != index for i in selected])
113 # insert indices to be moved at correct start
114 for insert_idx, elem_idx in enumerate(selected, start=insert - offset):
115 indices.insert(insert_idx, elem_idx)
116 # reorder list
117 self[:] = self[tuple(indices)]
118
119 def unselect_all(self, ignore=None):
120 """Unselects all layers expect any specified in ignore.
121
122 Parameters
123 ----------
124 ignore : Layer | None
125 Layer that should not be unselected if specified.
126 """
127 for layer in self:
128 if layer.selected and layer != ignore:
129 layer.selected = False
130
131 def remove_selected(self):
132 """Removes selected items from list.
133 """
134 to_delete = []
135 for i in range(len(self)):
136 if self[i].selected:
137 to_delete.append(i)
138 to_delete.reverse()
139 for i in to_delete:
140 self.pop(i)
141
[end of napari/components/layerlist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/napari/components/layerlist.py b/napari/components/layerlist.py
--- a/napari/components/layerlist.py
+++ b/napari/components/layerlist.py
@@ -2,6 +2,7 @@
from ..util.naming import inc_name_count
from ..util.list import ListModel
+from vispy.visuals.transforms import ChainTransform
def _add(event):
@@ -19,6 +20,7 @@
layers = event.source
layer = event.item
layer._order = 0
+ layer._node.transforms = ChainTransform()
layer._node.parent = None
|
{"golden_diff": "diff --git a/napari/components/layerlist.py b/napari/components/layerlist.py\n--- a/napari/components/layerlist.py\n+++ b/napari/components/layerlist.py\n@@ -2,6 +2,7 @@\n \n from ..util.naming import inc_name_count\n from ..util.list import ListModel\n+from vispy.visuals.transforms import ChainTransform\n \n \n def _add(event):\n@@ -19,6 +20,7 @@\n layers = event.source\n layer = event.item\n layer._order = 0\n+ layer._node.transforms = ChainTransform()\n layer._node.parent = None\n", "issue": "Massive traceback when deleting a (markers) layer\n## \ud83d\udc1b Bug\r\n\r\nSorry, no time to investigate now, but here is the traceback I saw when deleting a markers layer (using the trash icon):\r\n\r\n<details>\r\n\r\n```pytb\r\nIn [15]: WARNING: Traceback (most recent call last):\r\n File \"/Users/jni/conda/envs/36/bin/ipython\", line 11, in <module>\r\n sys.exit(start_ipython())\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/__init__.py\", line 125, in start_ipython\r\n return launch_new_instance(argv=argv, **kwargs)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\r\n app.start()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/ipapp.py\", line 356, in start\r\n self.shell.mainloop()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 480, in mainloop\r\n self.interact()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 463, in interact\r\n code = self.prompt_for_code()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 376, in prompt_for_code\r\n pre_run=self.pre_prompt, reset_current_buffer=True)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/interface.py\", line 415, in run\r\n self.eventloop.run(self.input, self.create_eventloop_callbacks())\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/posix.py\", line 102, in run\r\n self._inputhook_context.call_inputhook(ready)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/prompt_toolkit/eventloop/inputhook.py\", line 74, in call_inputhook\r\n self.inputhook(self)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/interactiveshell.py\", line 495, in inputhook\r\n self._inputhook(context)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/IPython/terminal/pt_inputhooks/qt.py\", line 35, in inputhook\r\n event_loop.exec_()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py\", line 182, in remove_selected\r\n self.pop(i)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/list/_model.py\", line 55, in pop\r\n self.changed.removed(item=obj)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py\", line 489, in __call__\r\n self._invoke_callback(cb, event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/util/event.py\", line 504, in _invoke_callback\r\n cb(event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/components/_layers_list/model.py\", line 29, in _remove\r\n layer.viewer = None\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/model.py\", line 162, in viewer\r\n self._parent = parent\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/layers/_base_layer/_visual_wrapper.py\", line 82, in _parent\r\n self._node.parent = parent\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/frozen.py\", line 17, in __setattr__\r\n object.__setattr__(self, key, value)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/scene/node.py\", line 209, in parent\r\n self._update_trsys(None)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/napari/_vispy/scene/visuals.py\", line 82, in _update_trsys\r\n self.transforms.scene_transform = scene.node_transform(doc)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/transform_system.py\", line 276, in scene_transform\r\n self._scene_transform.transforms = tr\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py\", line 96, in transforms\r\n self.update()\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py\", line 153, in update\r\n self.changed(*args)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 455, in __call__\r\n self._invoke_callback(cb, event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 471, in _invoke_callback\r\n cb(event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py\", line 212, in _subtr_changed\r\n self.update(ev)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/base_transform.py\", line 153, in update\r\n self.changed(*args)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 455, in __call__\r\n self._invoke_callback(cb, event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 475, in _invoke_callback\r\n self, cb_event=(cb, event))\r\n << caught exception here: >>\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/util/event.py\", line 471, in _invoke_callback\r\n cb(event)\r\n File \"/Users/jni/conda/envs/36/lib/python3.6/site-packages/vispy/visuals/transforms/chain.py\", line 281, in source_changed\r\n new_tr = [tr[0]]\r\nIndexError: list index out of range\r\nERROR: Invoking <bound method SimplifiedChainTransform.source_changed of <ChainTransform [<STTransform scale=[1. 1. 1. 1.] translate=[0. 0. 0. 0.] at 0x5071730616>,\r\n <STTransform scale=[2.6181054e+00 2.6181054e+00 1.0000000e-06 1.0000000e+00] translate=[-558.83954 -403.84387 0. 0. ] at 0x5319047544>] at 0x12e27aba8>> for Event\r\n```\r\n\r\n</details>\r\n\r\n## To Reproduce\r\n\r\nApproximately: (not sure which bits are important, maybe only the final step matters)\r\n- add a (3D) image\r\n- add a second image (segmentation in this case)\r\n- decrease opacity of second image\r\n- add a markers layer (with the + button)\r\n- set annotation mode\r\n- pan and zoom while holding down space\r\n- hit the delete button.\r\n\r\n## Expected behavior\r\n\r\nConsole should stay nice and clean when layers are deleted. ;)\r\n\r\n## Environment\r\n\r\n - napari Version (e.g., 1.0): 0.0.6 (from PyPI)\r\n - OS (e.g., Linux): macOS Mojave\r\n - Python version: 3.6\r\n - Any other relevant information:\r\n\n", "before_files": [{"content": "from ..layers import Layer\n\nfrom ..util.naming import inc_name_count\nfrom ..util.list import ListModel\n\n\ndef _add(event):\n \"\"\"When a layer is added, set its name and order.\"\"\"\n layers = event.source\n layer = event.item\n layer.name = layers._coerce_name(layer.name, layer)\n layer._order = -len(layers)\n layer.events.name.connect(lambda e: layers._update_name(e))\n layers.unselect_all(ignore=layer)\n\n\ndef _remove(event):\n \"\"\"When a layer is removed, remove its viewer.\"\"\"\n layers = event.source\n layer = event.item\n layer._order = 0\n layer._node.parent = None\n\n\ndef _reorder(event):\n \"\"\"When the list is reordered, propagate those changes to draw order.\"\"\"\n layers = event.source\n for i in range(len(layers)):\n layers[i]._order = -i\n\n\nclass LayerList(ListModel):\n \"\"\"List-like layer collection with built-in reordering and callback hooks.\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Event hooks:\n * added(item, index): whenever an item is added\n * removed(item): whenever an item is removed\n * reordered(): whenever the list is reordered\n \"\"\"\n\n def __init__(self):\n super().__init__(\n basetype=Layer, lookup={str: lambda q, e: q == e.name}\n )\n\n self.events.added.connect(_add)\n self.events.removed.connect(_remove)\n self.events.reordered.connect(_reorder)\n\n def __newlike__(self, iterable):\n return ListModel(self._basetype, iterable, self._lookup)\n\n def _coerce_name(self, name, layer=None):\n \"\"\"Coerce a name into a unique equivalent.\n\n Parameters\n ----------\n name : str\n Original name.\n layer : Layer, optional\n Layer for which name is generated.\n\n Returns\n -------\n new_name : str\n Coerced, unique name.\n \"\"\"\n for l in self:\n if l is layer:\n continue\n if l.name == name:\n name = inc_name_count(name)\n\n return name\n\n def _update_name(self, event):\n \"\"\"Coerce name of the layer in `event.layer`.\"\"\"\n layer = event.source\n layer.name = self._coerce_name(layer.name, layer)\n\n def move_selected(self, index, insert):\n \"\"\"Reorder list by moving the item at index and inserting it\n at the insert index. If additional items are selected these will\n get inserted at the insert index too. This allows for rearranging\n the list based on dragging and dropping a selection of items, where\n index is the index of the primary item being dragged, and insert is\n the index of the drop location, and the selection indicates if\n multiple items are being dragged. If the moved layer is not selected\n select it.\n\n Parameters\n ----------\n index : int\n Index of primary item to be moved\n insert : int\n Index that item(s) will be inserted at\n \"\"\"\n total = len(self)\n indices = list(range(total))\n if not self[index].selected:\n self.unselect_all()\n self[index].selected = True\n selected = [i for i in range(total) if self[i].selected]\n\n # remove all indices to be moved\n for i in selected:\n indices.remove(i)\n # adjust offset based on selected indices to move\n offset = sum([i < insert and i != index for i in selected])\n # insert indices to be moved at correct start\n for insert_idx, elem_idx in enumerate(selected, start=insert - offset):\n indices.insert(insert_idx, elem_idx)\n # reorder list\n self[:] = self[tuple(indices)]\n\n def unselect_all(self, ignore=None):\n \"\"\"Unselects all layers expect any specified in ignore.\n\n Parameters\n ----------\n ignore : Layer | None\n Layer that should not be unselected if specified.\n \"\"\"\n for layer in self:\n if layer.selected and layer != ignore:\n layer.selected = False\n\n def remove_selected(self):\n \"\"\"Removes selected items from list.\n \"\"\"\n to_delete = []\n for i in range(len(self)):\n if self[i].selected:\n to_delete.append(i)\n to_delete.reverse()\n for i in to_delete:\n self.pop(i)\n", "path": "napari/components/layerlist.py"}]}
| 3,786 | 140 |
gh_patches_debug_7470
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-661
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Engineering: Back-end - Combine logic for Agency Index and Getting Started page
_As a MST Senior Rider, when I go to benefits.calitp.org, I should see the new Introduction page design and content._
- Change the app logic to the following -
1. When there is only 1 transit agency, go directly to the agency index page (/mst)
2. From the agency index page, go directly to the verifier selection page (/eligibility/)
3. When there is only 1 eligibility verifier, go directly to the only start page (Login.gov verifier)
- Write tests for this new logic
- Update sample data for this new logic (??) so we can adequately test this in Dev, Staging
## How it is now
```mermaid
graph LR
A[core:index] -->|redirect| B(core:agency_index)
B -->|user clicks button| C(eligiblity:index)
C -->|redirect| D(eligibility:start)
```
## After this is implemented
```mermaid
graph LR
A[core:index] -->|redirect| B(core:agency_index)
B -->|*redirect*| C(eligiblity:index)
C -->|redirect| D(eligibility:start)
```
</issue>
<code>
[start of benefits/core/views.py]
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.shortcuts import redirect
6 from django.template import loader
7 from django.template.response import TemplateResponse
8 from django.urls import reverse
9 from django.utils.translation import gettext as _
10
11 from . import models, session, viewmodels
12 from .middleware import pageview_decorator
13
14
15 def PageTemplateResponse(request, page_vm):
16 """Helper returns a TemplateResponse using the common page template."""
17 return TemplateResponse(request, "core/page.html", page_vm.context_dict())
18
19
20 def _index_content_title():
21 """Helper returns the content title for the common index page."""
22 return _("core.pages.index.content_title")
23
24
25 def _index_url():
26 """Helper computes the index url path."""
27 return reverse("core:index")
28
29
30 @pageview_decorator
31 def index(request):
32 """View handler for the main entry page."""
33 session.reset(request)
34
35 agencies = models.TransitAgency.all_active()
36
37 if len(agencies) == 1:
38 agency = agencies[0]
39 return redirect(agency.index_url)
40
41 # generate a button to the landing page for each active agency
42 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
43 buttons[0].classes.append("mt-3")
44 buttons[0].label = _("core.pages.index.chooseprovider")
45
46 page = viewmodels.Page(
47 title=_("core.pages.index.title"),
48 content_title=_index_content_title(),
49 buttons=buttons,
50 classes="home",
51 )
52
53 return PageTemplateResponse(request, page)
54
55
56 @pageview_decorator
57 def agency_index(request, agency):
58 """View handler for an agency entry page."""
59 session.reset(request)
60 session.update(request, agency=agency, origin=agency.index_url)
61
62 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index"))
63 button.label = _("core.pages.agency_index.button.label")
64
65 page = viewmodels.Page(
66 title=_("core.pages.agency_index.title"),
67 content_title=_("core.pages.agency_index.content_title"),
68 button=button,
69 classes="home",
70 )
71
72 help_page = reverse("core:help")
73 context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}}
74
75 return TemplateResponse(request, "core/agency_index.html", context_dict)
76
77
78 @pageview_decorator
79 def help(request):
80 """View handler for the help page."""
81 if session.active_agency(request):
82 agency = session.agency(request)
83 buttons = viewmodels.Button.agency_contact_links(agency)
84 else:
85 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
86
87 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
88
89 page = viewmodels.Page(
90 title=_("core.buttons.help"),
91 content_title=_("core.buttons.help"),
92 buttons=buttons,
93 noimage=True,
94 )
95
96 return TemplateResponse(request, "core/help.html", page.context_dict())
97
98
99 @pageview_decorator
100 def bad_request(request, exception, template_name="400.html"):
101 """View handler for HTTP 400 Bad Request responses."""
102 if session.active_agency(request):
103 session.update(request, origin=session.agency(request).index_url)
104 else:
105 session.update(request, origin=_index_url())
106
107 home = viewmodels.Button.home(request)
108 page = viewmodels.ErrorPage.error(button=home)
109 t = loader.get_template(template_name)
110
111 return HttpResponseBadRequest(t.render(page.context_dict()))
112
113
114 @pageview_decorator
115 def csrf_failure(request, reason):
116 """
117 View handler for CSRF_FAILURE_VIEW with custom data.
118 """
119 if session.active_agency(request):
120 session.update(request, origin=session.agency(request).index_url)
121 else:
122 session.update(request, origin=_index_url())
123
124 home = viewmodels.Button.home(request)
125 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
126 t = loader.get_template("400.html")
127
128 return HttpResponseNotFound(t.render(page.context_dict()))
129
130
131 @pageview_decorator
132 def page_not_found(request, exception, template_name="404.html"):
133 """View handler for HTTP 404 Not Found responses."""
134 if session.active_agency(request):
135 session.update(request, origin=session.agency(request).index_url)
136 else:
137 session.update(request, origin=_index_url())
138
139 home = viewmodels.Button.home(request)
140 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
141 t = loader.get_template(template_name)
142
143 return HttpResponseNotFound(t.render(page.context_dict()))
144
145
146 @pageview_decorator
147 def server_error(request, template_name="500.html"):
148 """View handler for HTTP 500 Server Error responses."""
149 if session.active_agency(request):
150 session.update(request, origin=session.agency(request).index_url)
151 else:
152 session.update(request, origin=_index_url())
153
154 home = viewmodels.Button.home(request)
155 page = viewmodels.ErrorPage.error(button=home)
156 t = loader.get_template(template_name)
157
158 return HttpResponseServerError(t.render(page.context_dict()))
159
[end of benefits/core/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -59,6 +59,9 @@
session.reset(request)
session.update(request, agency=agency, origin=agency.index_url)
+ if len(agency.eligibility_verifiers.all()) == 1:
+ return redirect(reverse("eligibility:index"))
+
button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index"))
button.label = _("core.pages.agency_index.button.label")
|
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -59,6 +59,9 @@\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n \n+ if len(agency.eligibility_verifiers.all()) == 1:\n+ return redirect(reverse(\"eligibility:index\"))\n+\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n", "issue": "Engineering: Back-end - Combine logic for Agency Index and Getting Started page\n_As a MST Senior Rider, when I go to benefits.calitp.org, I should see the new Introduction page design and content._\r\n\r\n- Change the app logic to the following -\r\n1. When there is only 1 transit agency, go directly to the agency index page (/mst)\r\n2. From the agency index page, go directly to the verifier selection page (/eligibility/)\r\n3. When there is only 1 eligibility verifier, go directly to the only start page (Login.gov verifier)\r\n\r\n- Write tests for this new logic\r\n- Update sample data for this new logic (??) so we can adequately test this in Dev, Staging\r\n\r\n## How it is now\r\n\r\n```mermaid\r\ngraph LR\r\n A[core:index] -->|redirect| B(core:agency_index)\r\n B -->|user clicks button| C(eligiblity:index)\r\n C -->|redirect| D(eligibility:start)\r\n```\r\n\r\n## After this is implemented\r\n\r\n```mermaid\r\ngraph LR\r\n A[core:index] -->|redirect| B(core:agency_index)\r\n B -->|*redirect*| C(eligiblity:index)\r\n C -->|redirect| D(eligibility:start)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_index_content_title(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(\"core:help\")\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]}
| 2,296 | 133 |
gh_patches_debug_24104
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-1845
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
D2VTransformer.fit_transform doesn't work
The **X** parameter of the **fit_transform** method of **D2VTransformer** doesn't accept variables of any type, nor list of token lists (raises _AttributeError: 'list' object has no attribute 'words'_), nor list of TaggedDocument (raises _TypeError: sequence item 0: expected str instance, list found_).
Example:
```python
from gensim.sklearn_api import D2VTransformer
from gensim.models import doc2vec
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
(['calculus', 'mathematical'], 'mathematics'), (['geometry', 'operations', 'curves'], 'mathematics'),
(['natural', 'nuclear'], 'physics'), (['science', 'electromagnetism', 'natural'], 'physics')
]
d2v_sentences = [doc2vec.TaggedDocument(words[0], [i]) for i, words in enumerate(train_data)]
train_input = list(map(lambda x: x[0], train_data))
train_target = list(map(lambda x: class_dict[x[1]], train_data))
model = D2VTransformer(min_count=1)
model.fit_transform(train_input, train_target)
#model.fit_transform(d2v_sentences, train_target)
```
Versions:
Windows-10-10.0.16299-SP0
Python 3.6.4 | packaged by conda-forge | (default, Dec 24 2017, 10:11:43) [MSC v.1900 64 bit (AMD64)]
NumPy 1.13.3
SciPy 0.19.1
gensim 3.2.0
FAST_VERSION 1
</issue>
<code>
[start of gensim/sklearn_api/d2vmodel.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2011 Radim Rehurek <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 Scikit learn interface for gensim for easy use of gensim with scikit-learn
9 Follows scikit-learn API conventions
10 """
11
12 import numpy as np
13 from six import string_types
14 from sklearn.base import TransformerMixin, BaseEstimator
15 from sklearn.exceptions import NotFittedError
16
17 from gensim import models
18
19
20 class D2VTransformer(TransformerMixin, BaseEstimator):
21 """
22 Base Doc2Vec module
23 """
24
25 def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,
26 docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,
27 max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,
28 hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):
29 """
30 Sklearn api for Doc2Vec model. See gensim.models.Doc2Vec and gensim.models.Word2Vec for parameter details.
31 """
32 self.gensim_model = None
33 self.dm_mean = dm_mean
34 self.dm = dm
35 self.dbow_words = dbow_words
36 self.dm_concat = dm_concat
37 self.dm_tag_count = dm_tag_count
38 self.docvecs = docvecs
39 self.docvecs_mapfile = docvecs_mapfile
40 self.comment = comment
41 self.trim_rule = trim_rule
42
43 # attributes associated with gensim.models.Word2Vec
44 self.size = size
45 self.alpha = alpha
46 self.window = window
47 self.min_count = min_count
48 self.max_vocab_size = max_vocab_size
49 self.sample = sample
50 self.seed = seed
51 self.workers = workers
52 self.min_alpha = min_alpha
53 self.hs = hs
54 self.negative = negative
55 self.cbow_mean = int(cbow_mean)
56 self.hashfxn = hashfxn
57 self.iter = iter
58 self.sorted_vocab = sorted_vocab
59 self.batch_words = batch_words
60
61 def fit(self, X, y=None):
62 """
63 Fit the model according to the given training data.
64 Calls gensim.models.Doc2Vec
65 """
66 self.gensim_model = models.Doc2Vec(
67 documents=X, dm_mean=self.dm_mean, dm=self.dm,
68 dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
69 docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,
70 trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,
71 min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,
72 seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,
73 negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,
74 iter=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words
75 )
76 return self
77
78 def transform(self, docs):
79 """
80 Return the vector representations for the input documents.
81 The input `docs` should be a list of lists like
82 [['calculus', 'mathematical'],
83 ['geometry', 'operations', 'curves']]
84 or a single document like : ['calculus', 'mathematical']
85 """
86 if self.gensim_model is None:
87 raise NotFittedError(
88 "This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
89 )
90
91 # The input as array of array
92 if isinstance(docs[0], string_types):
93 docs = [docs]
94 vectors = [self.gensim_model.infer_vector(doc) for doc in docs]
95 return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))
96
[end of gensim/sklearn_api/d2vmodel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gensim/sklearn_api/d2vmodel.py b/gensim/sklearn_api/d2vmodel.py
--- a/gensim/sklearn_api/d2vmodel.py
+++ b/gensim/sklearn_api/d2vmodel.py
@@ -15,6 +15,7 @@
from sklearn.exceptions import NotFittedError
from gensim import models
+from gensim.models import doc2vec
class D2VTransformer(TransformerMixin, BaseEstimator):
@@ -63,8 +64,12 @@
Fit the model according to the given training data.
Calls gensim.models.Doc2Vec
"""
+ if isinstance(X[0], doc2vec.TaggedDocument):
+ d2v_sentences = X
+ else:
+ d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]
self.gensim_model = models.Doc2Vec(
- documents=X, dm_mean=self.dm_mean, dm=self.dm,
+ documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,
dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,
docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,
trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,
|
{"golden_diff": "diff --git a/gensim/sklearn_api/d2vmodel.py b/gensim/sklearn_api/d2vmodel.py\n--- a/gensim/sklearn_api/d2vmodel.py\n+++ b/gensim/sklearn_api/d2vmodel.py\n@@ -15,6 +15,7 @@\n from sklearn.exceptions import NotFittedError\n \n from gensim import models\n+from gensim.models import doc2vec\n \n \n class D2VTransformer(TransformerMixin, BaseEstimator):\n@@ -63,8 +64,12 @@\n Fit the model according to the given training data.\n Calls gensim.models.Doc2Vec\n \"\"\"\n+ if isinstance(X[0], doc2vec.TaggedDocument):\n+ d2v_sentences = X\n+ else:\n+ d2v_sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(X)]\n self.gensim_model = models.Doc2Vec(\n- documents=X, dm_mean=self.dm_mean, dm=self.dm,\n+ documents=d2v_sentences, dm_mean=self.dm_mean, dm=self.dm,\n dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,\n docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,\n trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,\n", "issue": "D2VTransformer.fit_transform doesn't work\nThe **X** parameter of the **fit_transform** method of **D2VTransformer** doesn't accept variables of any type, nor list of token lists (raises _AttributeError: 'list' object has no attribute 'words'_), nor list of TaggedDocument (raises _TypeError: sequence item 0: expected str instance, list found_).\r\n\r\nExample:\r\n```python\r\nfrom gensim.sklearn_api import D2VTransformer\r\nfrom gensim.models import doc2vec\r\n\r\nclass_dict = {'mathematics': 1, 'physics': 0}\r\ntrain_data = [\r\n (['calculus', 'mathematical'], 'mathematics'), (['geometry', 'operations', 'curves'], 'mathematics'),\r\n (['natural', 'nuclear'], 'physics'), (['science', 'electromagnetism', 'natural'], 'physics')\r\n]\r\nd2v_sentences = [doc2vec.TaggedDocument(words[0], [i]) for i, words in enumerate(train_data)]\r\ntrain_input = list(map(lambda x: x[0], train_data))\r\ntrain_target = list(map(lambda x: class_dict[x[1]], train_data))\r\n\r\nmodel = D2VTransformer(min_count=1)\r\nmodel.fit_transform(train_input, train_target)\r\n#model.fit_transform(d2v_sentences, train_target)\r\n```\r\nVersions:\r\nWindows-10-10.0.16299-SP0\r\nPython 3.6.4 | packaged by conda-forge | (default, Dec 24 2017, 10:11:43) [MSC v.1900 64 bit (AMD64)]\r\nNumPy 1.13.3\r\nSciPy 0.19.1\r\ngensim 3.2.0\r\nFAST_VERSION 1\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2011 Radim Rehurek <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nScikit learn interface for gensim for easy use of gensim with scikit-learn\nFollows scikit-learn API conventions\n\"\"\"\n\nimport numpy as np\nfrom six import string_types\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.exceptions import NotFittedError\n\nfrom gensim import models\n\n\nclass D2VTransformer(TransformerMixin, BaseEstimator):\n \"\"\"\n Base Doc2Vec module\n \"\"\"\n\n def __init__(self, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None,\n docvecs_mapfile=None, comment=None, trim_rule=None, size=100, alpha=0.025, window=5, min_count=5,\n max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001, hs=0, negative=5, cbow_mean=1,\n hashfxn=hash, iter=5, sorted_vocab=1, batch_words=10000):\n \"\"\"\n Sklearn api for Doc2Vec model. See gensim.models.Doc2Vec and gensim.models.Word2Vec for parameter details.\n \"\"\"\n self.gensim_model = None\n self.dm_mean = dm_mean\n self.dm = dm\n self.dbow_words = dbow_words\n self.dm_concat = dm_concat\n self.dm_tag_count = dm_tag_count\n self.docvecs = docvecs\n self.docvecs_mapfile = docvecs_mapfile\n self.comment = comment\n self.trim_rule = trim_rule\n\n # attributes associated with gensim.models.Word2Vec\n self.size = size\n self.alpha = alpha\n self.window = window\n self.min_count = min_count\n self.max_vocab_size = max_vocab_size\n self.sample = sample\n self.seed = seed\n self.workers = workers\n self.min_alpha = min_alpha\n self.hs = hs\n self.negative = negative\n self.cbow_mean = int(cbow_mean)\n self.hashfxn = hashfxn\n self.iter = iter\n self.sorted_vocab = sorted_vocab\n self.batch_words = batch_words\n\n def fit(self, X, y=None):\n \"\"\"\n Fit the model according to the given training data.\n Calls gensim.models.Doc2Vec\n \"\"\"\n self.gensim_model = models.Doc2Vec(\n documents=X, dm_mean=self.dm_mean, dm=self.dm,\n dbow_words=self.dbow_words, dm_concat=self.dm_concat, dm_tag_count=self.dm_tag_count,\n docvecs=self.docvecs, docvecs_mapfile=self.docvecs_mapfile, comment=self.comment,\n trim_rule=self.trim_rule, size=self.size, alpha=self.alpha, window=self.window,\n min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample,\n seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, hs=self.hs,\n negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn,\n iter=self.iter, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words\n )\n return self\n\n def transform(self, docs):\n \"\"\"\n Return the vector representations for the input documents.\n The input `docs` should be a list of lists like\n [['calculus', 'mathematical'],\n ['geometry', 'operations', 'curves']]\n or a single document like : ['calculus', 'mathematical']\n \"\"\"\n if self.gensim_model is None:\n raise NotFittedError(\n \"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.\"\n )\n\n # The input as array of array\n if isinstance(docs[0], string_types):\n docs = [docs]\n vectors = [self.gensim_model.infer_vector(doc) for doc in docs]\n return np.reshape(np.array(vectors), (len(docs), self.gensim_model.vector_size))\n", "path": "gensim/sklearn_api/d2vmodel.py"}]}
| 2,063 | 309 |
gh_patches_debug_18562
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1424
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cache does not get invalidated correctly when LanguageTreeNode is deleted
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
If you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.
Cache does not get invalidated correctly when LanguageTreeNode is deleted
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
If you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.
</issue>
<code>
[start of integreat_cms/cms/views/language_tree/language_tree_actions.py]
1 """
2 This module contains view actions for the language tree.
3 Typically, they do not render a whole page, but only parts of it or they redirect to regular views.
4 """
5 import logging
6
7 from django.contrib import messages
8 from django.shortcuts import redirect, get_object_or_404
9 from django.utils.translation import ugettext as _
10 from django.views.decorators.http import require_POST
11 from django.db import transaction
12
13 from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant
14
15 from ...constants import position
16 from ...decorators import permission_required
17 from ...models import LanguageTreeNode
18
19 logger = logging.getLogger(__name__)
20
21
22 @require_POST
23 @permission_required("cms.change_languagetreenode")
24 @transaction.atomic
25 def move_language_tree_node(
26 request, region_slug, language_tree_node_id, target_id, target_position
27 ):
28 """
29 This action moves the given language tree node to the given position relative to the given target.
30
31 :param request: The current request
32 :type request: ~django.http.HttpResponse
33
34 :param region_slug: The slug of the region which language tree should be modified
35 :type region_slug: str
36
37 :param language_tree_node_id: The id of the language tree node which should be moved
38 :type language_tree_node_id: int
39
40 :param target_id: The id of the target language tree node
41 :type target_id: int
42
43 :param target_position: The desired position (choices: :mod:`~integreat_cms.cms.constants.position`)
44 :type target_position: str
45
46 :return: A redirection to the language tree
47 :rtype: ~django.http.HttpResponseRedirect
48 """
49
50 region = request.region
51 language_tree_node = get_object_or_404(
52 region.language_tree_nodes, id=language_tree_node_id
53 )
54 target = get_object_or_404(region.language_tree_nodes, id=target_id)
55
56 try:
57 if target.depth == 1 and target_position in [position.LEFT, position.RIGHT]:
58 raise InvalidPosition(_("A region can only have one root language."))
59 language_tree_node.move(target, target_position)
60 # Call the save method on the (reloaded) node in order to trigger possible signal handlers etc.
61 # (The move()-method executes raw sql which might cause problems if the instance isn't fetched again)
62 language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)
63 language_tree_node.save()
64 messages.success(
65 request,
66 _('The language tree node "{}" was successfully moved.').format(
67 language_tree_node.translated_name
68 ),
69 )
70 logger.debug(
71 "%r moved to %r of %r by %r",
72 language_tree_node,
73 target_position,
74 target,
75 request.user,
76 )
77 except (ValueError, InvalidPosition, InvalidMoveToDescendant) as e:
78 messages.error(request, e)
79 logger.exception(e)
80
81 return redirect("language_tree", **{"region_slug": region_slug})
82
83
84 @require_POST
85 @permission_required("cms.delete_languagetreenode")
86 @transaction.atomic
87 def delete_language_tree_node(request, region_slug, language_tree_node_id):
88 """
89 Deletes the language node of distinct region
90 and all page translations for this language
91
92 :param request: The current request
93 :type request: ~django.http.HttpResponse
94
95 :param region_slug: The slug of the region which language node should be deleted
96 :type region_slug: str
97
98 :param language_tree_node_id: The id of the language tree node which should be deleted
99 :type language_tree_node_id: int
100
101 :return: A redirection to the language tree
102 :rtype: ~django.http.HttpResponseRedirect
103 """
104 # get current region
105 region = request.region
106 # get current selected language node
107 language_node = get_object_or_404(
108 region.language_tree_nodes, id=language_tree_node_id
109 )
110 # get all page translation assigned to the language node
111 page_translations = language_node.language.page_translations
112 # filter those translation that belong to the region and delete them
113 page_translations.filter(page__region=region).delete()
114 # get all event translation assigned to the language node
115 event_translations = language_node.language.event_translations
116 # filter those translation that belong to the region and delete them
117 event_translations.filter(event__region=region).delete()
118 # get all poi translation assigned to the language node
119 poi_translations = language_node.language.poi_translations
120 # filter those translation that belong to the region and delete them
121 poi_translations.filter(poi__region=region).delete()
122 # get all push notification translation assigned to the language node
123 push_notification_translations = (
124 language_node.language.push_notification_translations
125 )
126 # filter those translation that belong to the region and delete them
127 push_notification_translations.filter(push_notification__region=region).delete()
128
129 logger.debug("%r deleted by %r", language_node, request.user)
130 language_node.delete()
131 messages.success(
132 request,
133 _(
134 'The language tree node "{}" and all corresponding translations were successfully deleted.'
135 ).format(language_node.translated_name),
136 )
137 return redirect("language_tree", **{"region_slug": region_slug})
138
[end of integreat_cms/cms/views/language_tree/language_tree_actions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/integreat_cms/cms/views/language_tree/language_tree_actions.py b/integreat_cms/cms/views/language_tree/language_tree_actions.py
--- a/integreat_cms/cms/views/language_tree/language_tree_actions.py
+++ b/integreat_cms/cms/views/language_tree/language_tree_actions.py
@@ -11,6 +11,7 @@
from django.db import transaction
from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant
+from cacheops import invalidate_obj
from ...constants import position
from ...decorators import permission_required
@@ -128,6 +129,16 @@
logger.debug("%r deleted by %r", language_node, request.user)
language_node.delete()
+
+ for page in region.pages.all():
+ invalidate_obj(page)
+ for event in region.events.all():
+ invalidate_obj(event)
+ for poi in region.pois.all():
+ invalidate_obj(poi)
+ for push_notification in region.push_notifications.all():
+ invalidate_obj(push_notification)
+
messages.success(
request,
_(
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/language_tree/language_tree_actions.py b/integreat_cms/cms/views/language_tree/language_tree_actions.py\n--- a/integreat_cms/cms/views/language_tree/language_tree_actions.py\n+++ b/integreat_cms/cms/views/language_tree/language_tree_actions.py\n@@ -11,6 +11,7 @@\n from django.db import transaction\n \n from treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant\n+from cacheops import invalidate_obj\n \n from ...constants import position\n from ...decorators import permission_required\n@@ -128,6 +129,16 @@\n \n logger.debug(\"%r deleted by %r\", language_node, request.user)\n language_node.delete()\n+\n+ for page in region.pages.all():\n+ invalidate_obj(page)\n+ for event in region.events.all():\n+ invalidate_obj(event)\n+ for poi in region.pois.all():\n+ invalidate_obj(poi)\n+ for push_notification in region.push_notifications.all():\n+ invalidate_obj(push_notification)\n+\n messages.success(\n request,\n _(\n", "issue": "Cache does not get invalidated correctly when LanguageTreeNode is deleted\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nIf you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.\r\n\nCache does not get invalidated correctly when LanguageTreeNode is deleted\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nIf you delete a language the page tree still shows the translation status of the corresponding language. This is probably a caching issue.\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains view actions for the language tree.\nTypically, they do not render a whole page, but only parts of it or they redirect to regular views.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\nfrom django.db import transaction\n\nfrom treebeard.exceptions import InvalidPosition, InvalidMoveToDescendant\n\nfrom ...constants import position\nfrom ...decorators import permission_required\nfrom ...models import LanguageTreeNode\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@permission_required(\"cms.change_languagetreenode\")\[email protected]\ndef move_language_tree_node(\n request, region_slug, language_tree_node_id, target_id, target_position\n):\n \"\"\"\n This action moves the given language tree node to the given position relative to the given target.\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param region_slug: The slug of the region which language tree should be modified\n :type region_slug: str\n\n :param language_tree_node_id: The id of the language tree node which should be moved\n :type language_tree_node_id: int\n\n :param target_id: The id of the target language tree node\n :type target_id: int\n\n :param target_position: The desired position (choices: :mod:`~integreat_cms.cms.constants.position`)\n :type target_position: str\n\n :return: A redirection to the language tree\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n\n region = request.region\n language_tree_node = get_object_or_404(\n region.language_tree_nodes, id=language_tree_node_id\n )\n target = get_object_or_404(region.language_tree_nodes, id=target_id)\n\n try:\n if target.depth == 1 and target_position in [position.LEFT, position.RIGHT]:\n raise InvalidPosition(_(\"A region can only have one root language.\"))\n language_tree_node.move(target, target_position)\n # Call the save method on the (reloaded) node in order to trigger possible signal handlers etc.\n # (The move()-method executes raw sql which might cause problems if the instance isn't fetched again)\n language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)\n language_tree_node.save()\n messages.success(\n request,\n _('The language tree node \"{}\" was successfully moved.').format(\n language_tree_node.translated_name\n ),\n )\n logger.debug(\n \"%r moved to %r of %r by %r\",\n language_tree_node,\n target_position,\n target,\n request.user,\n )\n except (ValueError, InvalidPosition, InvalidMoveToDescendant) as e:\n messages.error(request, e)\n logger.exception(e)\n\n return redirect(\"language_tree\", **{\"region_slug\": region_slug})\n\n\n@require_POST\n@permission_required(\"cms.delete_languagetreenode\")\[email protected]\ndef delete_language_tree_node(request, region_slug, language_tree_node_id):\n \"\"\"\n Deletes the language node of distinct region\n and all page translations for this language\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param region_slug: The slug of the region which language node should be deleted\n :type region_slug: str\n\n :param language_tree_node_id: The id of the language tree node which should be deleted\n :type language_tree_node_id: int\n\n :return: A redirection to the language tree\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # get current region\n region = request.region\n # get current selected language node\n language_node = get_object_or_404(\n region.language_tree_nodes, id=language_tree_node_id\n )\n # get all page translation assigned to the language node\n page_translations = language_node.language.page_translations\n # filter those translation that belong to the region and delete them\n page_translations.filter(page__region=region).delete()\n # get all event translation assigned to the language node\n event_translations = language_node.language.event_translations\n # filter those translation that belong to the region and delete them\n event_translations.filter(event__region=region).delete()\n # get all poi translation assigned to the language node\n poi_translations = language_node.language.poi_translations\n # filter those translation that belong to the region and delete them\n poi_translations.filter(poi__region=region).delete()\n # get all push notification translation assigned to the language node\n push_notification_translations = (\n language_node.language.push_notification_translations\n )\n # filter those translation that belong to the region and delete them\n push_notification_translations.filter(push_notification__region=region).delete()\n\n logger.debug(\"%r deleted by %r\", language_node, request.user)\n language_node.delete()\n messages.success(\n request,\n _(\n 'The language tree node \"{}\" and all corresponding translations were successfully deleted.'\n ).format(language_node.translated_name),\n )\n return redirect(\"language_tree\", **{\"region_slug\": region_slug})\n", "path": "integreat_cms/cms/views/language_tree/language_tree_actions.py"}]}
| 2,102 | 232 |
gh_patches_debug_16723
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-2915
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot edit or delete challenge pages with slug `evaluation`
</issue>
<code>
[start of app/grandchallenge/pages/forms.py]
1 from django import forms
2 from django.db.models import BLANK_CHOICE_DASH
3 from django_summernote.widgets import SummernoteInplaceWidget
4
5 from grandchallenge.core.forms import SaveFormInitMixin
6 from grandchallenge.pages.models import Page
7
8
9 class PageCreateForm(SaveFormInitMixin, forms.ModelForm):
10 def __init__(self, *args, challenge, **kwargs):
11 self.challenge = challenge
12 super().__init__(*args, **kwargs)
13
14 class Meta:
15 model = Page
16 fields = (
17 "display_title",
18 "permission_level",
19 "hidden",
20 "html",
21 )
22 widgets = {"html": SummernoteInplaceWidget()}
23 help_texts = {
24 "html": (
25 "The content of your page. <b>Please note</b>: your html will "
26 "be filtered after it has been saved to remove any non-HTML5 "
27 "compliant markup and scripts. The filtering is not reflected "
28 "in the live view so please <b>check the rendering of your "
29 "page after you click save</b>. If you're going to paste from "
30 "another source such as MS Word, please <b>paste without "
31 "formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or "
32 "<b>⇧+⌥+⌘+V</b> on OS X."
33 )
34 }
35
36
37 class PageUpdateForm(PageCreateForm):
38 """Like the page update form but you can also move the page."""
39
40 move = forms.CharField(widget=forms.Select)
41 move.required = False
42 move.widget.choices = (
43 (BLANK_CHOICE_DASH[0]),
44 (Page.FIRST, "First"),
45 (Page.UP, "Up"),
46 (Page.DOWN, "Down"),
47 (Page.LAST, "Last"),
48 )
49
[end of app/grandchallenge/pages/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/grandchallenge/pages/forms.py b/app/grandchallenge/pages/forms.py
--- a/app/grandchallenge/pages/forms.py
+++ b/app/grandchallenge/pages/forms.py
@@ -1,4 +1,5 @@
from django import forms
+from django.core.exceptions import ValidationError
from django.db.models import BLANK_CHOICE_DASH
from django_summernote.widgets import SummernoteInplaceWidget
@@ -33,6 +34,17 @@
)
}
+ def clean_display_title(self):
+ display_title = self.cleaned_data["display_title"]
+
+ if display_title.lower() in {"evaluation"}:
+ # evaluation results in a URL clash, especially with the update page.
+ raise ValidationError(
+ "Title not allowed, please select an alternative"
+ )
+
+ return display_title
+
class PageUpdateForm(PageCreateForm):
"""Like the page update form but you can also move the page."""
|
{"golden_diff": "diff --git a/app/grandchallenge/pages/forms.py b/app/grandchallenge/pages/forms.py\n--- a/app/grandchallenge/pages/forms.py\n+++ b/app/grandchallenge/pages/forms.py\n@@ -1,4 +1,5 @@\n from django import forms\n+from django.core.exceptions import ValidationError\n from django.db.models import BLANK_CHOICE_DASH\n from django_summernote.widgets import SummernoteInplaceWidget\n \n@@ -33,6 +34,17 @@\n )\n }\n \n+ def clean_display_title(self):\n+ display_title = self.cleaned_data[\"display_title\"]\n+\n+ if display_title.lower() in {\"evaluation\"}:\n+ # evaluation results in a URL clash, especially with the update page.\n+ raise ValidationError(\n+ \"Title not allowed, please select an alternative\"\n+ )\n+\n+ return display_title\n+\n \n class PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n", "issue": "Cannot edit or delete challenge pages with slug `evaluation`\n\n", "before_files": [{"content": "from django import forms\nfrom django.db.models import BLANK_CHOICE_DASH\nfrom django_summernote.widgets import SummernoteInplaceWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.pages.models import Page\n\n\nclass PageCreateForm(SaveFormInitMixin, forms.ModelForm):\n def __init__(self, *args, challenge, **kwargs):\n self.challenge = challenge\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Page\n fields = (\n \"display_title\",\n \"permission_level\",\n \"hidden\",\n \"html\",\n )\n widgets = {\"html\": SummernoteInplaceWidget()}\n help_texts = {\n \"html\": (\n \"The content of your page. <b>Please note</b>: your html will \"\n \"be filtered after it has been saved to remove any non-HTML5 \"\n \"compliant markup and scripts. The filtering is not reflected \"\n \"in the live view so please <b>check the rendering of your \"\n \"page after you click save</b>. If you're going to paste from \"\n \"another source such as MS Word, please <b>paste without \"\n \"formatting</b> using <b>CTRL+SHIFT+V</b> on Windows or \"\n \"<b>\u21e7+\u2325+\u2318+V</b> on OS X.\"\n )\n }\n\n\nclass PageUpdateForm(PageCreateForm):\n \"\"\"Like the page update form but you can also move the page.\"\"\"\n\n move = forms.CharField(widget=forms.Select)\n move.required = False\n move.widget.choices = (\n (BLANK_CHOICE_DASH[0]),\n (Page.FIRST, \"First\"),\n (Page.UP, \"Up\"),\n (Page.DOWN, \"Down\"),\n (Page.LAST, \"Last\"),\n )\n", "path": "app/grandchallenge/pages/forms.py"}]}
| 1,035 | 208 |
gh_patches_debug_8713
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-3133
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't save term translation after updating the term's text in DB
I've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)
https://pontoon.mozilla.org/a/terminology/term/666/change/
To my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053
Error in log is
```
Internal Server Error: /translations/create/
response = get_response(request)
File "/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
return func(request, *args, **kwargs)
File "/app/pontoon/base/utils.py", line 126, in wrap
return f(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/contextlib.py", line 79, in inner
File "/app/pontoon/translations/views.py", line 117, in create_translation
translation.save(failed_checks=failed_checks)
File "/app/pontoon/base/models.py", line 3429, in save
self.entity.reset_term_translation(self.locale)
File "/app/pontoon/base/models.py", line 2865, in reset_term_translation
term = self.term
pontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.
```
Can't save term translation after updating the term's text in DB
I've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)
https://pontoon.mozilla.org/a/terminology/term/666/change/
To my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053
Error in log is
```
Internal Server Error: /translations/create/
response = get_response(request)
File "/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
return func(request, *args, **kwargs)
File "/app/pontoon/base/utils.py", line 126, in wrap
return f(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/app/.heroku/python/lib/python3.9/contextlib.py", line 79, in inner
File "/app/pontoon/translations/views.py", line 117, in create_translation
translation.save(failed_checks=failed_checks)
File "/app/pontoon/base/models.py", line 3429, in save
self.entity.reset_term_translation(self.locale)
File "/app/pontoon/base/models.py", line 2865, in reset_term_translation
term = self.term
pontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.
```
</issue>
<code>
[start of pontoon/terminology/models.py]
1 import re
2
3 from django.db import models
4
5 from pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource
6
7
8 def update_terminology_project_stats():
9 resource = Resource.objects.get(project__slug="terminology")
10 project = resource.project
11 total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()
12 resource.total_strings = total_strings
13 resource.save(update_fields=["total_strings"])
14
15 translated_resources = list(TranslatedResource.objects.filter(resource=resource))
16
17 for translated_resource in translated_resources:
18 translated_resource.calculate_stats(save=False)
19
20 TranslatedResource.objects.bulk_update(
21 translated_resources,
22 [
23 "total_strings",
24 "approved_strings",
25 "pretranslated_strings",
26 "strings_with_errors",
27 "strings_with_warnings",
28 "unreviewed_strings",
29 ],
30 )
31
32 project.aggregate_stats()
33
34 for locale in project.locales.all():
35 locale.aggregate_stats()
36
37 for projectlocale in ProjectLocale.objects.filter(project=project):
38 projectlocale.aggregate_stats()
39
40
41 class TermQuerySet(models.QuerySet):
42 def for_string(self, string):
43 terms = []
44 available_terms = self.exclude(definition="").exclude(forbidden=True)
45
46 for term in available_terms:
47 term_text = r"\b" + re.escape(term.text)
48 flags = 0 if term.case_sensitive else re.IGNORECASE
49
50 if re.search(term_text, string, flags):
51 terms.append(term)
52
53 return terms
54
55 def delete(self, *args, **kwargs):
56 """
57 Before deleting Terms, obsolete their Entities
58 """
59 for term in self:
60 term.obsolete_entity()
61 update_terminology_project_stats()
62
63 super().delete(*args, **kwargs)
64
65
66 class Term(models.Model):
67 text = models.CharField(max_length=255)
68 entity = models.OneToOneField("base.Entity", models.SET_NULL, null=True, blank=True)
69
70 class PartOfSpeech(models.TextChoices):
71 ADJECTIVE = "adjective", "Adjective"
72 ADVERB = "adverb", "Adverb"
73 NOUN = "noun", "Noun"
74 VERB = "verb", "Verb"
75
76 part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)
77
78 definition = models.TextField(blank=True)
79 usage = models.TextField(blank=True)
80 notes = models.TextField(blank=True)
81
82 class Status(models.TextChoices):
83 APPROVED = "approved", "Approved"
84 NEW = "new", "New"
85 OBSOLETE = "obsolete", "Obsolete"
86 REVIEW = "review", "Review"
87
88 status = models.CharField(
89 max_length=20, choices=Status.choices, null=True, blank=True
90 )
91
92 case_sensitive = models.BooleanField(default=False)
93 do_not_translate = models.BooleanField(default=False)
94 forbidden = models.BooleanField(default=False)
95
96 created_at = models.DateTimeField(auto_now_add=True)
97 created_by = models.ForeignKey(
98 "auth.User", models.SET_NULL, related_name="terms", null=True, blank=True
99 )
100
101 objects = TermQuerySet.as_manager()
102
103 def translation(self, locale):
104 """
105 Get locale translation of the term.
106 """
107 if self.do_not_translate:
108 return self.text
109 else:
110 try:
111 return self.translations.get(locale=locale).text
112 except (AttributeError, TermTranslation.DoesNotExist):
113 return None
114
115 @property
116 def localizable(self):
117 """
118 Check if the term is localizable.
119 """
120 if self.do_not_translate:
121 return False
122
123 if self.forbidden:
124 return False
125
126 if self.definition == "":
127 return False
128
129 return True
130
131 def entity_comment(self):
132 """
133 Generate entity comment from the term.
134 """
135 comment = "{}. {}.".format(
136 self.part_of_speech.capitalize(),
137 self.definition.capitalize().rstrip("."),
138 )
139
140 if self.usage:
141 comment += " E.g. {}.".format(self.usage.capitalize().rstrip("."))
142
143 return comment
144
145 def create_entity(self):
146 """
147 An Entity must be created (or deobsoleted) for a Term according to the
148 following rules:
149 - Entity.string contains content of Term.text.
150 - Entity.comment contains joint content of several fields:
151 Term.part_of_speech. Term.definition. E.g.: Term.usage.
152 """
153 resource = Resource.objects.get(project__slug="terminology")
154
155 entity, created = Entity.objects.get_or_create(
156 string=self.text,
157 comment=self.entity_comment(),
158 resource=resource,
159 )
160
161 # Using update() to avoid circular Term.save() call
162 Term.objects.filter(pk=self.pk).update(entity_id=entity.id)
163
164 if not created:
165 entity.obsolete = False
166 entity.save(update_fields=["obsolete"])
167
168 # Make sure Term entities are ordered alphabetically
169 entities = list(
170 Entity.objects.filter(resource=resource, obsolete=False).order_by("string")
171 )
172 for index, e in enumerate(entities):
173 e.order = index
174 Entity.objects.bulk_update(entities, ["order"])
175
176 def obsolete_entity(self):
177 entity = self.entity
178
179 # Ignore if term doesn't have entity assigned
180 if entity is None:
181 return
182
183 entity.obsolete = True
184 entity.save(update_fields=["obsolete"])
185
186 def handle_term_update(self):
187 """
188 Before updating an existing Term, update its Entity if neccessary
189 """
190 term = self
191 old_term = Term.objects.get(pk=term.pk)
192
193 # Ignore changes to non-localizable terms that stay non-localizable
194 if not old_term.localizable and not term.localizable:
195 return
196
197 # If localizable term becomes non-localizable, obsolete its Entity
198 if old_term.localizable and not term.localizable:
199 old_term.obsolete_entity()
200
201 # If non-localizable term becomes localizable, create a corresponding Entity
202 elif not old_term.localizable and term.localizable:
203 term.create_entity()
204
205 # If relevant changes are made to the localizable term that stays localizable
206 else:
207 # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.
208 if old_term.text != term.text:
209 old_term.obsolete_entity()
210 term.create_entity()
211
212 # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.
213 elif (
214 old_term.part_of_speech != term.part_of_speech
215 or old_term.definition != term.definition
216 or old_term.usage != term.usage
217 ):
218 entity = term.entity
219
220 # Ignore if term doesn't have entity assigned
221 if entity is None:
222 return
223
224 entity.comment = term.entity_comment()
225 entity.save(update_fields=["comment"])
226
227 return
228
229 update_terminology_project_stats()
230
231 def handle_term_create(self):
232 """
233 After creating a new localizable Term, create its Entity
234 """
235 self.create_entity()
236 update_terminology_project_stats()
237
238 def save(self, *args, **kwargs):
239 created = self.pk is None
240
241 if not created:
242 self.handle_term_update()
243
244 super().save(*args, **kwargs)
245
246 if created and self.localizable:
247 self.handle_term_create()
248
249 def delete(self, *args, **kwargs):
250 """
251 Before deleting a Term, obsolete its Entity
252 """
253 self.obsolete_entity()
254 update_terminology_project_stats()
255
256 super().delete(*args, **kwargs)
257
258 def __str__(self):
259 return self.text
260
261
262 class TermTranslation(models.Model):
263 term = models.ForeignKey(Term, models.CASCADE, related_name="translations")
264 locale = models.ForeignKey("base.Locale", models.CASCADE, related_name="terms")
265
266 text = models.CharField(max_length=255)
267
268 def __str__(self):
269 return self.text
270
[end of pontoon/terminology/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py
--- a/pontoon/terminology/models.py
+++ b/pontoon/terminology/models.py
@@ -160,10 +160,12 @@
# Using update() to avoid circular Term.save() call
Term.objects.filter(pk=self.pk).update(entity_id=entity.id)
+ entity.term = self
if not created:
entity.obsolete = False
- entity.save(update_fields=["obsolete"])
+
+ entity.save()
# Make sure Term entities are ordered alphabetically
entities = list(
|
{"golden_diff": "diff --git a/pontoon/terminology/models.py b/pontoon/terminology/models.py\n--- a/pontoon/terminology/models.py\n+++ b/pontoon/terminology/models.py\n@@ -160,10 +160,12 @@\n \n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n+ entity.term = self\n \n if not created:\n entity.obsolete = False\n- entity.save(update_fields=[\"obsolete\"])\n+\n+ entity.save()\n \n # Make sure Term entities are ordered alphabetically\n entities = list(\n", "issue": "Can't save term translation after updating the term's text in DB\nI've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)\r\nhttps://pontoon.mozilla.org/a/terminology/term/666/change/\r\n\r\nTo my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053\r\n\r\nError in log is \r\n\r\n```\r\nInternal Server Error: /translations/create/\r\n response = get_response(request)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n return func(request, *args, **kwargs)\r\n File \"/app/pontoon/base/utils.py\", line 126, in wrap\r\n return f(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/contextlib.py\", line 79, in inner\r\n File \"/app/pontoon/translations/views.py\", line 117, in create_translation\r\n translation.save(failed_checks=failed_checks)\r\n File \"/app/pontoon/base/models.py\", line 3429, in save\r\n self.entity.reset_term_translation(self.locale)\r\n File \"/app/pontoon/base/models.py\", line 2865, in reset_term_translation\r\n term = self.term\r\npontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.\r\n```\nCan't save term translation after updating the term's text in DB\nI've changed the term's text to `Review checker`, since that's the correct capitalization in English (changed from the original)\r\nhttps://pontoon.mozilla.org/a/terminology/term/666/change/\r\n\r\nTo my surprise, the term showed up as untranslated for Italian. The other problem is that I cannot save the translation: https://pontoon.mozilla.org/it/terminology/all-resources/?status=missing&string=301053\r\n\r\nError in log is \r\n\r\n```\r\nInternal Server Error: /translations/create/\r\n response = get_response(request)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n return func(request, *args, **kwargs)\r\n File \"/app/pontoon/base/utils.py\", line 126, in wrap\r\n return f(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/app/.heroku/python/lib/python3.9/contextlib.py\", line 79, in inner\r\n File \"/app/pontoon/translations/views.py\", line 117, in create_translation\r\n translation.save(failed_checks=failed_checks)\r\n File \"/app/pontoon/base/models.py\", line 3429, in save\r\n self.entity.reset_term_translation(self.locale)\r\n File \"/app/pontoon/base/models.py\", line 2865, in reset_term_translation\r\n term = self.term\r\npontoon.base.models.Entity.term.RelatedObjectDoesNotExist: Entity has no term.\r\n```\n", "before_files": [{"content": "import re\n\nfrom django.db import models\n\nfrom pontoon.base.models import Entity, ProjectLocale, Resource, TranslatedResource\n\n\ndef update_terminology_project_stats():\n resource = Resource.objects.get(project__slug=\"terminology\")\n project = resource.project\n total_strings = Entity.objects.filter(resource=resource, obsolete=False).count()\n resource.total_strings = total_strings\n resource.save(update_fields=[\"total_strings\"])\n\n translated_resources = list(TranslatedResource.objects.filter(resource=resource))\n\n for translated_resource in translated_resources:\n translated_resource.calculate_stats(save=False)\n\n TranslatedResource.objects.bulk_update(\n translated_resources,\n [\n \"total_strings\",\n \"approved_strings\",\n \"pretranslated_strings\",\n \"strings_with_errors\",\n \"strings_with_warnings\",\n \"unreviewed_strings\",\n ],\n )\n\n project.aggregate_stats()\n\n for locale in project.locales.all():\n locale.aggregate_stats()\n\n for projectlocale in ProjectLocale.objects.filter(project=project):\n projectlocale.aggregate_stats()\n\n\nclass TermQuerySet(models.QuerySet):\n def for_string(self, string):\n terms = []\n available_terms = self.exclude(definition=\"\").exclude(forbidden=True)\n\n for term in available_terms:\n term_text = r\"\\b\" + re.escape(term.text)\n flags = 0 if term.case_sensitive else re.IGNORECASE\n\n if re.search(term_text, string, flags):\n terms.append(term)\n\n return terms\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting Terms, obsolete their Entities\n \"\"\"\n for term in self:\n term.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n\nclass Term(models.Model):\n text = models.CharField(max_length=255)\n entity = models.OneToOneField(\"base.Entity\", models.SET_NULL, null=True, blank=True)\n\n class PartOfSpeech(models.TextChoices):\n ADJECTIVE = \"adjective\", \"Adjective\"\n ADVERB = \"adverb\", \"Adverb\"\n NOUN = \"noun\", \"Noun\"\n VERB = \"verb\", \"Verb\"\n\n part_of_speech = models.CharField(max_length=50, choices=PartOfSpeech.choices)\n\n definition = models.TextField(blank=True)\n usage = models.TextField(blank=True)\n notes = models.TextField(blank=True)\n\n class Status(models.TextChoices):\n APPROVED = \"approved\", \"Approved\"\n NEW = \"new\", \"New\"\n OBSOLETE = \"obsolete\", \"Obsolete\"\n REVIEW = \"review\", \"Review\"\n\n status = models.CharField(\n max_length=20, choices=Status.choices, null=True, blank=True\n )\n\n case_sensitive = models.BooleanField(default=False)\n do_not_translate = models.BooleanField(default=False)\n forbidden = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n created_by = models.ForeignKey(\n \"auth.User\", models.SET_NULL, related_name=\"terms\", null=True, blank=True\n )\n\n objects = TermQuerySet.as_manager()\n\n def translation(self, locale):\n \"\"\"\n Get locale translation of the term.\n \"\"\"\n if self.do_not_translate:\n return self.text\n else:\n try:\n return self.translations.get(locale=locale).text\n except (AttributeError, TermTranslation.DoesNotExist):\n return None\n\n @property\n def localizable(self):\n \"\"\"\n Check if the term is localizable.\n \"\"\"\n if self.do_not_translate:\n return False\n\n if self.forbidden:\n return False\n\n if self.definition == \"\":\n return False\n\n return True\n\n def entity_comment(self):\n \"\"\"\n Generate entity comment from the term.\n \"\"\"\n comment = \"{}. {}.\".format(\n self.part_of_speech.capitalize(),\n self.definition.capitalize().rstrip(\".\"),\n )\n\n if self.usage:\n comment += \" E.g. {}.\".format(self.usage.capitalize().rstrip(\".\"))\n\n return comment\n\n def create_entity(self):\n \"\"\"\n An Entity must be created (or deobsoleted) for a Term according to the\n following rules:\n - Entity.string contains content of Term.text.\n - Entity.comment contains joint content of several fields:\n Term.part_of_speech. Term.definition. E.g.: Term.usage.\n \"\"\"\n resource = Resource.objects.get(project__slug=\"terminology\")\n\n entity, created = Entity.objects.get_or_create(\n string=self.text,\n comment=self.entity_comment(),\n resource=resource,\n )\n\n # Using update() to avoid circular Term.save() call\n Term.objects.filter(pk=self.pk).update(entity_id=entity.id)\n\n if not created:\n entity.obsolete = False\n entity.save(update_fields=[\"obsolete\"])\n\n # Make sure Term entities are ordered alphabetically\n entities = list(\n Entity.objects.filter(resource=resource, obsolete=False).order_by(\"string\")\n )\n for index, e in enumerate(entities):\n e.order = index\n Entity.objects.bulk_update(entities, [\"order\"])\n\n def obsolete_entity(self):\n entity = self.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.obsolete = True\n entity.save(update_fields=[\"obsolete\"])\n\n def handle_term_update(self):\n \"\"\"\n Before updating an existing Term, update its Entity if neccessary\n \"\"\"\n term = self\n old_term = Term.objects.get(pk=term.pk)\n\n # Ignore changes to non-localizable terms that stay non-localizable\n if not old_term.localizable and not term.localizable:\n return\n\n # If localizable term becomes non-localizable, obsolete its Entity\n if old_term.localizable and not term.localizable:\n old_term.obsolete_entity()\n\n # If non-localizable term becomes localizable, create a corresponding Entity\n elif not old_term.localizable and term.localizable:\n term.create_entity()\n\n # If relevant changes are made to the localizable term that stays localizable\n else:\n # If Term.text changes, a new Entity instance gets created and the previous one becomes obsolete.\n if old_term.text != term.text:\n old_term.obsolete_entity()\n term.create_entity()\n\n # If Term.part_of_speech, Term.definition or Term.usage change, Entity.comment gets updated.\n elif (\n old_term.part_of_speech != term.part_of_speech\n or old_term.definition != term.definition\n or old_term.usage != term.usage\n ):\n entity = term.entity\n\n # Ignore if term doesn't have entity assigned\n if entity is None:\n return\n\n entity.comment = term.entity_comment()\n entity.save(update_fields=[\"comment\"])\n\n return\n\n update_terminology_project_stats()\n\n def handle_term_create(self):\n \"\"\"\n After creating a new localizable Term, create its Entity\n \"\"\"\n self.create_entity()\n update_terminology_project_stats()\n\n def save(self, *args, **kwargs):\n created = self.pk is None\n\n if not created:\n self.handle_term_update()\n\n super().save(*args, **kwargs)\n\n if created and self.localizable:\n self.handle_term_create()\n\n def delete(self, *args, **kwargs):\n \"\"\"\n Before deleting a Term, obsolete its Entity\n \"\"\"\n self.obsolete_entity()\n update_terminology_project_stats()\n\n super().delete(*args, **kwargs)\n\n def __str__(self):\n return self.text\n\n\nclass TermTranslation(models.Model):\n term = models.ForeignKey(Term, models.CASCADE, related_name=\"translations\")\n locale = models.ForeignKey(\"base.Locale\", models.CASCADE, related_name=\"terms\")\n\n text = models.CharField(max_length=255)\n\n def __str__(self):\n return self.text\n", "path": "pontoon/terminology/models.py"}]}
| 3,718 | 140 |
gh_patches_debug_64705
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-3141
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lvg fails if pvs option omitted when state=absent
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
`lvg` module
##### ANSIBLE VERSION
```
ansible 2.1.2.0
config file = /Users/jsok/workspace/ansible.cfg
configured module search path = Default w/o overrides
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
CentOS 6.7
##### SUMMARY
The `pvs` option is not necessary when `state=absent`, however failing to supply an empty string will cause the module to fail.
##### STEPS TO REPRODUCE
```
---
- name: Remove a volume group
hosts: localhost
tasks:
- name: Remove vg01
lvg:
vg: vg01
state: absent
```
##### EXPECTED RESULTS
The volume group is removed successfully.
##### ACTUAL RESULTS
```
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "module_stderr": "", "module_stdout": "Traceback (most recent call last):\r\n File \"/tmp/ansible_tj_9JM/ansible_module_lvg.py\", line 255, in <module>\r\n main()\r\n File \"/tmp/ansible_tj_9JM/ansible_module_lvg.py\", line 140, in main\r\n for idx, dev in enumerate(dev_list):\r\nUnboundLocalError: local variable 'dev_list' referenced before assignment\r\n", "msg": "MODULE FAILURE"}
```
</issue>
<code>
[start of system/lvg.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013, Alexander Bulimov <[email protected]>
5 # based on lvol module by Jeroen Hoekx <[email protected]>
6 #
7 # This file is part of Ansible
8 #
9 # Ansible is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # Ansible is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
21
22 DOCUMENTATION = '''
23 ---
24 author: "Alexander Bulimov (@abulimov)"
25 module: lvg
26 short_description: Configure LVM volume groups
27 description:
28 - This module creates, removes or resizes volume groups.
29 version_added: "1.1"
30 options:
31 vg:
32 description:
33 - The name of the volume group.
34 required: true
35 pvs:
36 description:
37 - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.
38 - The module will take care of running pvcreate if needed.
39 required: false
40 pesize:
41 description:
42 - The size of the physical extent in megabytes. Must be a power of 2.
43 default: 4
44 required: false
45 vg_options:
46 description:
47 - Additional options to pass to C(vgcreate) when creating the volume group.
48 default: null
49 required: false
50 version_added: "1.6"
51 state:
52 choices: [ "present", "absent" ]
53 default: present
54 description:
55 - Control if the volume group exists.
56 required: false
57 force:
58 choices: [ "yes", "no" ]
59 default: "no"
60 description:
61 - If yes, allows to remove volume group with logical volumes.
62 required: false
63 notes:
64 - module does not modify PE size for already present volume group
65 '''
66
67 EXAMPLES = '''
68 # Create a volume group on top of /dev/sda1 with physical extent size = 32MB.
69 - lvg: vg=vg.services pvs=/dev/sda1 pesize=32
70
71 # Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
72 # If, for example, we already have VG vg.services on top of /dev/sdb1,
73 # this VG will be extended by /dev/sdc5. Or if vg.services was created on
74 # top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
75 # and then reduce by /dev/sda5.
76 - lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5
77
78 # Remove a volume group with name vg.services.
79 - lvg: vg=vg.services state=absent
80 '''
81
82 def parse_vgs(data):
83 vgs = []
84 for line in data.splitlines():
85 parts = line.strip().split(';')
86 vgs.append({
87 'name': parts[0],
88 'pv_count': int(parts[1]),
89 'lv_count': int(parts[2]),
90 })
91 return vgs
92
93 def find_mapper_device_name(module, dm_device):
94 dmsetup_cmd = module.get_bin_path('dmsetup', True)
95 mapper_prefix = '/dev/mapper/'
96 rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
97 if rc != 0:
98 module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
99 mapper_device = mapper_prefix + dm_name.rstrip()
100 return mapper_device
101
102 def parse_pvs(module, data):
103 pvs = []
104 dm_prefix = '/dev/dm-'
105 for line in data.splitlines():
106 parts = line.strip().split(';')
107 if parts[0].startswith(dm_prefix):
108 parts[0] = find_mapper_device_name(module, parts[0])
109 pvs.append({
110 'name': parts[0],
111 'vg_name': parts[1],
112 })
113 return pvs
114
115 def main():
116 module = AnsibleModule(
117 argument_spec = dict(
118 vg=dict(required=True),
119 pvs=dict(type='list'),
120 pesize=dict(type='int', default=4),
121 vg_options=dict(default=''),
122 state=dict(choices=["absent", "present"], default='present'),
123 force=dict(type='bool', default='no'),
124 ),
125 supports_check_mode=True,
126 )
127
128 vg = module.params['vg']
129 state = module.params['state']
130 force = module.boolean(module.params['force'])
131 pesize = module.params['pesize']
132 vgoptions = module.params['vg_options'].split()
133
134 if module.params['pvs']:
135 dev_list = module.params['pvs']
136 elif state == 'present':
137 module.fail_json(msg="No physical volumes given.")
138
139 # LVM always uses real paths not symlinks so replace symlinks with actual path
140 for idx, dev in enumerate(dev_list):
141 dev_list[idx] = os.path.realpath(dev)
142
143 if state=='present':
144 ### check given devices
145 for test_dev in dev_list:
146 if not os.path.exists(test_dev):
147 module.fail_json(msg="Device %s not found."%test_dev)
148
149 ### get pv list
150 pvs_cmd = module.get_bin_path('pvs', True)
151 rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd)
152 if rc != 0:
153 module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err)
154
155 ### check pv for devices
156 pvs = parse_pvs(module, current_pvs)
157 used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]
158 if used_pvs:
159 module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name']))
160
161 vgs_cmd = module.get_bin_path('vgs', True)
162 rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
163
164 if rc != 0:
165 module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err)
166
167 changed = False
168
169 vgs = parse_vgs(current_vgs)
170
171 for test_vg in vgs:
172 if test_vg['name'] == vg:
173 this_vg = test_vg
174 break
175 else:
176 this_vg = None
177
178 if this_vg is None:
179 if state == 'present':
180 ### create VG
181 if module.check_mode:
182 changed = True
183 else:
184 ### create PV
185 pvcreate_cmd = module.get_bin_path('pvcreate', True)
186 for current_dev in dev_list:
187 rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd,current_dev))
188 if rc == 0:
189 changed = True
190 else:
191 module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
192 vgcreate_cmd = module.get_bin_path('vgcreate')
193 rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)
194 if rc == 0:
195 changed = True
196 else:
197 module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err)
198 else:
199 if state == 'absent':
200 if module.check_mode:
201 module.exit_json(changed=True)
202 else:
203 if this_vg['lv_count'] == 0 or force:
204 ### remove VG
205 vgremove_cmd = module.get_bin_path('vgremove', True)
206 rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
207 if rc == 0:
208 module.exit_json(changed=True)
209 else:
210 module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err)
211 else:
212 module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg))
213
214 ### resize VG
215 current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]
216 devs_to_remove = list(set(current_devs) - set(dev_list))
217 devs_to_add = list(set(dev_list) - set(current_devs))
218
219 if devs_to_add or devs_to_remove:
220 if module.check_mode:
221 changed = True
222 else:
223 if devs_to_add:
224 devs_to_add_string = ' '.join(devs_to_add)
225 ### create PV
226 pvcreate_cmd = module.get_bin_path('pvcreate', True)
227 for current_dev in devs_to_add:
228 rc,_,err = module.run_command("%s -f %s" % (pvcreate_cmd, current_dev))
229 if rc == 0:
230 changed = True
231 else:
232 module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
233 ### add PV to our VG
234 vgextend_cmd = module.get_bin_path('vgextend', True)
235 rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
236 if rc == 0:
237 changed = True
238 else:
239 module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err)
240
241 ### remove some PV from our VG
242 if devs_to_remove:
243 devs_to_remove_string = ' '.join(devs_to_remove)
244 vgreduce_cmd = module.get_bin_path('vgreduce', True)
245 rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
246 if rc == 0:
247 changed = True
248 else:
249 module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err)
250
251 module.exit_json(changed=changed)
252
253 # import module snippets
254 from ansible.module_utils.basic import *
255 main()
256
[end of system/lvg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/system/lvg.py b/system/lvg.py
--- a/system/lvg.py
+++ b/system/lvg.py
@@ -131,6 +131,7 @@
pesize = module.params['pesize']
vgoptions = module.params['vg_options'].split()
+ dev_list = []
if module.params['pvs']:
dev_list = module.params['pvs']
elif state == 'present':
|
{"golden_diff": "diff --git a/system/lvg.py b/system/lvg.py\n--- a/system/lvg.py\n+++ b/system/lvg.py\n@@ -131,6 +131,7 @@\n pesize = module.params['pesize']\n vgoptions = module.params['vg_options'].split()\n \n+ dev_list = []\n if module.params['pvs']:\n dev_list = module.params['pvs']\n elif state == 'present':\n", "issue": "lvg fails if pvs option omitted when state=absent\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\n`lvg` module\n##### ANSIBLE VERSION\n\n```\nansible 2.1.2.0\n config file = /Users/jsok/workspace/ansible.cfg\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nCentOS 6.7\n##### SUMMARY\n\nThe `pvs` option is not necessary when `state=absent`, however failing to supply an empty string will cause the module to fail.\n##### STEPS TO REPRODUCE\n\n```\n\n---\n- name: Remove a volume group\n hosts: localhost\n tasks:\n - name: Remove vg01\n lvg:\n vg: vg01\n state: absent\n```\n##### EXPECTED RESULTS\n\nThe volume group is removed successfully.\n##### ACTUAL RESULTS\n\n```\nfatal: [localhost]: FAILED! => {\"changed\": false, \"failed\": true, \"module_stderr\": \"\", \"module_stdout\": \"Traceback (most recent call last):\\r\\n File \\\"/tmp/ansible_tj_9JM/ansible_module_lvg.py\\\", line 255, in <module>\\r\\n main()\\r\\n File \\\"/tmp/ansible_tj_9JM/ansible_module_lvg.py\\\", line 140, in main\\r\\n for idx, dev in enumerate(dev_list):\\r\\nUnboundLocalError: local variable 'dev_list' referenced before assignment\\r\\n\", \"msg\": \"MODULE FAILURE\"}\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Alexander Bulimov <[email protected]>\n# based on lvol module by Jeroen Hoekx <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nauthor: \"Alexander Bulimov (@abulimov)\"\nmodule: lvg\nshort_description: Configure LVM volume groups\ndescription:\n - This module creates, removes or resizes volume groups.\nversion_added: \"1.1\"\noptions:\n vg:\n description:\n - The name of the volume group.\n required: true\n pvs:\n description:\n - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group.\n - The module will take care of running pvcreate if needed. \n required: false\n pesize:\n description:\n - The size of the physical extent in megabytes. Must be a power of 2.\n default: 4\n required: false\n vg_options:\n description:\n - Additional options to pass to C(vgcreate) when creating the volume group.\n default: null\n required: false\n version_added: \"1.6\"\n state:\n choices: [ \"present\", \"absent\" ]\n default: present\n description:\n - Control if the volume group exists.\n required: false\n force:\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n description:\n - If yes, allows to remove volume group with logical volumes.\n required: false\nnotes:\n - module does not modify PE size for already present volume group\n'''\n\nEXAMPLES = '''\n# Create a volume group on top of /dev/sda1 with physical extent size = 32MB.\n- lvg: vg=vg.services pvs=/dev/sda1 pesize=32\n\n# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.\n# If, for example, we already have VG vg.services on top of /dev/sdb1,\n# this VG will be extended by /dev/sdc5. Or if vg.services was created on\n# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,\n# and then reduce by /dev/sda5.\n- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5\n\n# Remove a volume group with name vg.services.\n- lvg: vg=vg.services state=absent\n'''\n\ndef parse_vgs(data):\n vgs = []\n for line in data.splitlines():\n parts = line.strip().split(';')\n vgs.append({\n 'name': parts[0],\n 'pv_count': int(parts[1]),\n 'lv_count': int(parts[2]),\n })\n return vgs\n\ndef find_mapper_device_name(module, dm_device):\n dmsetup_cmd = module.get_bin_path('dmsetup', True)\n mapper_prefix = '/dev/mapper/'\n rc, dm_name, err = module.run_command(\"%s info -C --noheadings -o name %s\" % (dmsetup_cmd, dm_device))\n if rc != 0:\n module.fail_json(msg=\"Failed executing dmsetup command.\", rc=rc, err=err)\n mapper_device = mapper_prefix + dm_name.rstrip()\n return mapper_device\n\ndef parse_pvs(module, data):\n pvs = []\n dm_prefix = '/dev/dm-'\n for line in data.splitlines():\n parts = line.strip().split(';')\n if parts[0].startswith(dm_prefix):\n parts[0] = find_mapper_device_name(module, parts[0])\n pvs.append({\n 'name': parts[0],\n 'vg_name': parts[1],\n })\n return pvs\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n vg=dict(required=True),\n pvs=dict(type='list'),\n pesize=dict(type='int', default=4),\n vg_options=dict(default=''),\n state=dict(choices=[\"absent\", \"present\"], default='present'),\n force=dict(type='bool', default='no'),\n ),\n supports_check_mode=True,\n )\n\n vg = module.params['vg']\n state = module.params['state']\n force = module.boolean(module.params['force'])\n pesize = module.params['pesize']\n vgoptions = module.params['vg_options'].split()\n\n if module.params['pvs']:\n dev_list = module.params['pvs']\n elif state == 'present':\n module.fail_json(msg=\"No physical volumes given.\")\n\n # LVM always uses real paths not symlinks so replace symlinks with actual path\n for idx, dev in enumerate(dev_list):\n dev_list[idx] = os.path.realpath(dev)\n\n if state=='present':\n ### check given devices\n for test_dev in dev_list:\n if not os.path.exists(test_dev):\n module.fail_json(msg=\"Device %s not found.\"%test_dev)\n\n ### get pv list\n pvs_cmd = module.get_bin_path('pvs', True)\n rc,current_pvs,err = module.run_command(\"%s --noheadings -o pv_name,vg_name --separator ';'\" % pvs_cmd)\n if rc != 0:\n module.fail_json(msg=\"Failed executing pvs command.\",rc=rc, err=err)\n\n ### check pv for devices\n pvs = parse_pvs(module, current_pvs)\n used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ]\n if used_pvs:\n module.fail_json(msg=\"Device %s is already in %s volume group.\"%(used_pvs[0]['name'],used_pvs[0]['vg_name']))\n\n vgs_cmd = module.get_bin_path('vgs', True)\n rc,current_vgs,err = module.run_command(\"%s --noheadings -o vg_name,pv_count,lv_count --separator ';'\" % vgs_cmd)\n\n if rc != 0:\n module.fail_json(msg=\"Failed executing vgs command.\",rc=rc, err=err)\n\n changed = False\n\n vgs = parse_vgs(current_vgs)\n\n for test_vg in vgs:\n if test_vg['name'] == vg:\n this_vg = test_vg\n break\n else:\n this_vg = None\n\n if this_vg is None:\n if state == 'present':\n ### create VG\n if module.check_mode:\n changed = True\n else:\n ### create PV\n pvcreate_cmd = module.get_bin_path('pvcreate', True)\n for current_dev in dev_list:\n rc,_,err = module.run_command(\"%s -f %s\" % (pvcreate_cmd,current_dev))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating physical volume '%s' failed\" % current_dev, rc=rc, err=err)\n vgcreate_cmd = module.get_bin_path('vgcreate')\n rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list)\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating volume group '%s' failed\"%vg, rc=rc, err=err)\n else:\n if state == 'absent':\n if module.check_mode:\n module.exit_json(changed=True)\n else:\n if this_vg['lv_count'] == 0 or force:\n ### remove VG\n vgremove_cmd = module.get_bin_path('vgremove', True)\n rc,_,err = module.run_command(\"%s --force %s\" % (vgremove_cmd, vg))\n if rc == 0:\n module.exit_json(changed=True)\n else:\n module.fail_json(msg=\"Failed to remove volume group %s\"%(vg),rc=rc, err=err)\n else:\n module.fail_json(msg=\"Refuse to remove non-empty volume group %s without force=yes\"%(vg))\n\n ### resize VG\n current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ]\n devs_to_remove = list(set(current_devs) - set(dev_list))\n devs_to_add = list(set(dev_list) - set(current_devs))\n\n if devs_to_add or devs_to_remove:\n if module.check_mode:\n changed = True\n else:\n if devs_to_add:\n devs_to_add_string = ' '.join(devs_to_add)\n ### create PV\n pvcreate_cmd = module.get_bin_path('pvcreate', True)\n for current_dev in devs_to_add:\n rc,_,err = module.run_command(\"%s -f %s\" % (pvcreate_cmd, current_dev))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Creating physical volume '%s' failed\"%current_dev, rc=rc, err=err)\n ### add PV to our VG\n vgextend_cmd = module.get_bin_path('vgextend', True)\n rc,_,err = module.run_command(\"%s %s %s\" % (vgextend_cmd, vg, devs_to_add_string))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Unable to extend %s by %s.\"%(vg, devs_to_add_string),rc=rc,err=err)\n\n ### remove some PV from our VG\n if devs_to_remove:\n devs_to_remove_string = ' '.join(devs_to_remove)\n vgreduce_cmd = module.get_bin_path('vgreduce', True)\n rc,_,err = module.run_command(\"%s --force %s %s\" % (vgreduce_cmd, vg, devs_to_remove_string))\n if rc == 0:\n changed = True\n else:\n module.fail_json(msg=\"Unable to reduce %s by %s.\"%(vg, devs_to_remove_string),rc=rc,err=err)\n\n module.exit_json(changed=changed)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "system/lvg.py"}]}
| 3,929 | 96 |
gh_patches_debug_18537
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-1306
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove usage of `distutils` which has been deprecated
It turns out that `distutils` has been deprecated and will be removed in Python 3.12. We should stop using it prior to our 0.7 release. Fortunately I only found one usage in: `plasmapy/plasma/sources/openpmd_hdf5.py`.
</issue>
<code>
[start of plasmapy/plasma/sources/openpmd_hdf5.py]
1 __all__ = ["HDF5Reader"]
2
3 import astropy.units as u
4 import numpy as np
5 import os
6
7 from distutils.version import StrictVersion
8
9 from plasmapy.plasma.exceptions import DataStandardError
10 from plasmapy.plasma.plasma_base import GenericPlasma
11
12 _OUTDATED_VERSION = "1.1.0"
13 _NEWER_VERSION = "2.0.0"
14
15 # This is the order what OpenPMD uses to store unit
16 # dimensions for a record.
17 _UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)
18
19
20 def _fetch_units(openPMD_dims):
21 """Converts a collection of OpenPMD dimensions to astropy.units."""
22
23 units = u.dimensionless_unscaled
24 for factor, unit in zip(openPMD_dims, _UNITS):
25 units *= unit ** factor
26 units, *_ = units.compose()
27 return units
28
29
30 def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):
31 """Checks if the passed version is supported or not."""
32
33 parsed_version = StrictVersion(openPMD_version)
34 outdated_version = StrictVersion(outdated)
35 newer_version = StrictVersion(newer)
36 return outdated_version <= parsed_version < newer_version
37
38
39 class HDF5Reader(GenericPlasma):
40 """
41 .. _OpenPMD: https://www.openpmd.org/
42
43 Core class for accessing various attributes on HDF5 files that
44 are based on OpenPMD_ standards.
45
46 Parameters
47 ----------
48 hdf5 : `str`
49 Path to HDF5 file.
50
51 **kwargs
52 Any keyword accepted by `GenericPlasma`.
53
54 """
55
56 def __init__(self, hdf5, **kwargs):
57 super().__init__(**kwargs)
58
59 if not os.path.isfile(hdf5):
60 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
61 try:
62 import h5py
63 except (ImportError, ModuleNotFoundError) as e:
64 from plasmapy.optional_deps import h5py_import_error
65
66 raise ImportError(h5py_import_error) from e
67
68 h5 = h5py.File(hdf5, "r")
69 self.h5 = h5
70
71 self._check_valid_openpmd_version()
72
73 self.subname = tuple(self.h5["data"])[0]
74
75 def __enter__(self):
76 return self.h5
77
78 def close(self):
79 self.h5.close()
80
81 def __exit__(self):
82 self.h5.close()
83
84 def _check_valid_openpmd_version(self):
85 try:
86 openPMD_version = self.h5.attrs["openPMD"].decode("utf-8")
87 if _valid_version(openPMD_version):
88 return True
89 else:
90 raise DataStandardError(
91 f"We currently only support HDF5 versions"
92 f"starting from v{_OUTDATED_VERSION} and "
93 f"lower than v{_NEWER_VERSION}. You can "
94 f"however convert your HDF5 to a supported "
95 f"version. For more information; see "
96 f"https://github.com/openPMD/openPMD-updater"
97 )
98 except KeyError:
99 raise DataStandardError(
100 "Input HDF5 file does not go on with standards defined by OpenPMD"
101 )
102
103 @property
104 def electric_field(self):
105 """
106 An (x, y, z) array containing electric field data. (Returned as an astropy
107 `~astropy.units.Quantity`.)
108 """
109 path = f"data/{self.subname}/fields/E"
110 if path in self.h5:
111 units = _fetch_units(self.h5[path].attrs["unitDimension"])
112 axes = [self.h5[path][axis] for axis in self.h5[path]]
113 return np.array(axes) * units
114 else:
115 raise AttributeError("No electric field data available in HDF5 file")
116
117 @property
118 def charge_density(self):
119 """
120 An array containing charge density data. (Returned as an astropy
121 `~astropy.units.Quantity`.)
122 """
123 path = f"data/{self.subname}/fields/rho"
124 if path in self.h5:
125 units = _fetch_units(self.h5[path].attrs["unitDimension"])
126 return np.array(self.h5[path]) * units
127 else:
128 raise AttributeError("No charge density data available in HDF5 file")
129
130 @property
131 def magnetic_field(self):
132 path = f"data/{self.subname}/fields/B"
133 if path in self.h5:
134 units = _fetch_units(self.h5[path].attrs["unitDimension"])
135 axes = [self.h5[path][axis] for axis in self.h5[path]]
136 return np.array(axes) * units
137 else:
138 raise AttributeError("No magnetic field data available in HDF5 file")
139
140 @property
141 def electric_current(self):
142 path = f"data/{self.subname}/fields/J"
143 if path in self.h5:
144 units = _fetch_units(self.h5[path].attrs["unitDimension"])
145 axes = [self.h5[path][axis] for axis in self.h5[path]]
146 return np.array(axes) * units
147 else:
148 raise AttributeError("No electric current data available in HDF5 file")
149
150 @classmethod
151 def is_datasource_for(cls, **kwargs):
152 if "hdf5" not in kwargs:
153 return False
154
155 hdf5 = kwargs.get("hdf5")
156 openPMD = kwargs.get("openPMD")
157
158 isfile = os.path.isfile(hdf5)
159 if not isfile:
160 raise FileNotFoundError(f"Could not find file: '{hdf5}'")
161
162 if "openPMD" not in kwargs:
163 try:
164 import h5py
165 except (ImportError, ModuleNotFoundError) as e:
166 from plasmapy.optional_deps import h5py_import_error
167
168 raise ImportError(h5py_import_error) from e
169
170 h5 = h5py.File(hdf5, "r")
171 try:
172 openPMD = h5.attrs["openPMD"]
173 except KeyError:
174 openPMD = False
175
176 return openPMD
177
[end of plasmapy/plasma/sources/openpmd_hdf5.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plasmapy/plasma/sources/openpmd_hdf5.py b/plasmapy/plasma/sources/openpmd_hdf5.py
--- a/plasmapy/plasma/sources/openpmd_hdf5.py
+++ b/plasmapy/plasma/sources/openpmd_hdf5.py
@@ -4,7 +4,7 @@
import numpy as np
import os
-from distutils.version import StrictVersion
+from packaging.version import Version
from plasmapy.plasma.exceptions import DataStandardError
from plasmapy.plasma.plasma_base import GenericPlasma
@@ -30,9 +30,9 @@
def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):
"""Checks if the passed version is supported or not."""
- parsed_version = StrictVersion(openPMD_version)
- outdated_version = StrictVersion(outdated)
- newer_version = StrictVersion(newer)
+ parsed_version = Version(openPMD_version)
+ outdated_version = Version(outdated)
+ newer_version = Version(newer)
return outdated_version <= parsed_version < newer_version
|
{"golden_diff": "diff --git a/plasmapy/plasma/sources/openpmd_hdf5.py b/plasmapy/plasma/sources/openpmd_hdf5.py\n--- a/plasmapy/plasma/sources/openpmd_hdf5.py\n+++ b/plasmapy/plasma/sources/openpmd_hdf5.py\n@@ -4,7 +4,7 @@\n import numpy as np\n import os\n \n-from distutils.version import StrictVersion\n+from packaging.version import Version\n \n from plasmapy.plasma.exceptions import DataStandardError\n from plasmapy.plasma.plasma_base import GenericPlasma\n@@ -30,9 +30,9 @@\n def _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"Checks if the passed version is supported or not.\"\"\"\n \n- parsed_version = StrictVersion(openPMD_version)\n- outdated_version = StrictVersion(outdated)\n- newer_version = StrictVersion(newer)\n+ parsed_version = Version(openPMD_version)\n+ outdated_version = Version(outdated)\n+ newer_version = Version(newer)\n return outdated_version <= parsed_version < newer_version\n", "issue": "Remove usage of `distutils` which has been deprecated\nIt turns out that `distutils` has been deprecated and will be removed in Python 3.12. We should stop using it prior to our 0.7 release. Fortunately I only found one usage in: `plasmapy/plasma/sources/openpmd_hdf5.py`.\n", "before_files": [{"content": "__all__ = [\"HDF5Reader\"]\n\nimport astropy.units as u\nimport numpy as np\nimport os\n\nfrom distutils.version import StrictVersion\n\nfrom plasmapy.plasma.exceptions import DataStandardError\nfrom plasmapy.plasma.plasma_base import GenericPlasma\n\n_OUTDATED_VERSION = \"1.1.0\"\n_NEWER_VERSION = \"2.0.0\"\n\n# This is the order what OpenPMD uses to store unit\n# dimensions for a record.\n_UNITS = (u.meter, u.kilogram, u.second, u.ampere, u.Kelvin, u.mol, u.candela)\n\n\ndef _fetch_units(openPMD_dims):\n \"\"\"Converts a collection of OpenPMD dimensions to astropy.units.\"\"\"\n\n units = u.dimensionless_unscaled\n for factor, unit in zip(openPMD_dims, _UNITS):\n units *= unit ** factor\n units, *_ = units.compose()\n return units\n\n\ndef _valid_version(openPMD_version, outdated=_OUTDATED_VERSION, newer=_NEWER_VERSION):\n \"\"\"Checks if the passed version is supported or not.\"\"\"\n\n parsed_version = StrictVersion(openPMD_version)\n outdated_version = StrictVersion(outdated)\n newer_version = StrictVersion(newer)\n return outdated_version <= parsed_version < newer_version\n\n\nclass HDF5Reader(GenericPlasma):\n \"\"\"\n .. _OpenPMD: https://www.openpmd.org/\n\n Core class for accessing various attributes on HDF5 files that\n are based on OpenPMD_ standards.\n\n Parameters\n ----------\n hdf5 : `str`\n Path to HDF5 file.\n\n **kwargs\n Any keyword accepted by `GenericPlasma`.\n\n \"\"\"\n\n def __init__(self, hdf5, **kwargs):\n super().__init__(**kwargs)\n\n if not os.path.isfile(hdf5):\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n try:\n import h5py\n except (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n\n raise ImportError(h5py_import_error) from e\n\n h5 = h5py.File(hdf5, \"r\")\n self.h5 = h5\n\n self._check_valid_openpmd_version()\n\n self.subname = tuple(self.h5[\"data\"])[0]\n\n def __enter__(self):\n return self.h5\n\n def close(self):\n self.h5.close()\n\n def __exit__(self):\n self.h5.close()\n\n def _check_valid_openpmd_version(self):\n try:\n openPMD_version = self.h5.attrs[\"openPMD\"].decode(\"utf-8\")\n if _valid_version(openPMD_version):\n return True\n else:\n raise DataStandardError(\n f\"We currently only support HDF5 versions\"\n f\"starting from v{_OUTDATED_VERSION} and \"\n f\"lower than v{_NEWER_VERSION}. You can \"\n f\"however convert your HDF5 to a supported \"\n f\"version. For more information; see \"\n f\"https://github.com/openPMD/openPMD-updater\"\n )\n except KeyError:\n raise DataStandardError(\n \"Input HDF5 file does not go on with standards defined by OpenPMD\"\n )\n\n @property\n def electric_field(self):\n \"\"\"\n An (x, y, z) array containing electric field data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/E\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric field data available in HDF5 file\")\n\n @property\n def charge_density(self):\n \"\"\"\n An array containing charge density data. (Returned as an astropy\n `~astropy.units.Quantity`.)\n \"\"\"\n path = f\"data/{self.subname}/fields/rho\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n return np.array(self.h5[path]) * units\n else:\n raise AttributeError(\"No charge density data available in HDF5 file\")\n\n @property\n def magnetic_field(self):\n path = f\"data/{self.subname}/fields/B\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No magnetic field data available in HDF5 file\")\n\n @property\n def electric_current(self):\n path = f\"data/{self.subname}/fields/J\"\n if path in self.h5:\n units = _fetch_units(self.h5[path].attrs[\"unitDimension\"])\n axes = [self.h5[path][axis] for axis in self.h5[path]]\n return np.array(axes) * units\n else:\n raise AttributeError(\"No electric current data available in HDF5 file\")\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n if \"hdf5\" not in kwargs:\n return False\n\n hdf5 = kwargs.get(\"hdf5\")\n openPMD = kwargs.get(\"openPMD\")\n\n isfile = os.path.isfile(hdf5)\n if not isfile:\n raise FileNotFoundError(f\"Could not find file: '{hdf5}'\")\n\n if \"openPMD\" not in kwargs:\n try:\n import h5py\n except (ImportError, ModuleNotFoundError) as e:\n from plasmapy.optional_deps import h5py_import_error\n\n raise ImportError(h5py_import_error) from e\n\n h5 = h5py.File(hdf5, \"r\")\n try:\n openPMD = h5.attrs[\"openPMD\"]\n except KeyError:\n openPMD = False\n\n return openPMD\n", "path": "plasmapy/plasma/sources/openpmd_hdf5.py"}]}
| 2,415 | 247 |
gh_patches_debug_20301
|
rasdani/github-patches
|
git_diff
|
UTNkar__moore-134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing search template
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
### Prerequisites
* [X] Put an X between the brackets on this line if you have done all of the
following:
* Reproduced the problem with clear cache.
* (If running the application locally:) Made sure your running the newest version on the development branch
* Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues
### Description
The current search is now really working. Although you can search, the results are ugly and very spotty.
Goal: Evaluate the searchable fields within Project Moore and make a search template.
### Steps to Reproduce
1. Visit the home page
2. Search something
3. Be disappointed ☹️
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
</issue>
<code>
[start of website/materialize/templatetags/materialize.py]
1 from django import template
2 from django.template import loader
3
4 register = template.Library()
5
6
7 def get_widget_name(field):
8 return field.field.widget.__class__.__name__
9
10
11 def append_classes(field):
12 field.field.label_suffix = ''
13 classes = field.field.widget.attrs.get('class', '')
14 classes += ' validate'
15 if field.errors:
16 classes += ' invalid'
17 field.field.widget.attrs['class'] = classes
18
19
20 def render_field(template, field, prefix=None):
21 t = loader.get_template(template)
22 c = {
23 'field': field,
24 'prefix': prefix,
25 }
26 html = t.render(c)
27 return html
28
29
30 @register.simple_tag
31 def materialize_field(field, prefix=None):
32 widget = get_widget_name(field)
33 if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',
34 'Textarea']:
35 append_classes(field)
36 return render_field('materialize/form/input.html', field, prefix)
37 else:
38 return field.as_widget()
39
[end of website/materialize/templatetags/materialize.py]
[start of website/search/views.py]
1 from __future__ import absolute_import, unicode_literals
2
3 from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
4 from django.shortcuts import render
5
6 from wagtail.wagtailcore.models import Page
7 from wagtail.wagtailsearch.models import Query
8
9
10 def search(request):
11 search_query = request.GET.get('query', None)
12 page = request.GET.get('page', 1)
13
14 # Search
15 if search_query:
16 search_results = Page.objects.live().search(search_query)
17 query = Query.get(search_query)
18
19 # Record hit
20 query.add_hit()
21 else:
22 search_results = Page.objects.none()
23
24 # Pagination
25 paginator = Paginator(search_results, 10)
26 try:
27 search_results = paginator.page(page)
28 except PageNotAnInteger:
29 search_results = paginator.page(1)
30 except EmptyPage:
31 search_results = paginator.page(paginator.num_pages)
32
33 return render(request, 'search/search.html', {
34 'search_query': search_query,
35 'search_results': search_results,
36 })
37
[end of website/search/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/materialize/templatetags/materialize.py b/website/materialize/templatetags/materialize.py
--- a/website/materialize/templatetags/materialize.py
+++ b/website/materialize/templatetags/materialize.py
@@ -23,8 +23,7 @@
'field': field,
'prefix': prefix,
}
- html = t.render(c)
- return html
+ return t.render(c)
@register.simple_tag
@@ -36,3 +35,12 @@
return render_field('materialize/form/input.html', field, prefix)
else:
return field.as_widget()
+
+
[email protected]_tag('materialize/pagination.html')
+def materialize_pagination(page, url):
+ return {
+ 'page': page,
+ 'paginator': page.paginator,
+ 'url': url,
+ }
diff --git a/website/search/views.py b/website/search/views.py
--- a/website/search/views.py
+++ b/website/search/views.py
@@ -22,7 +22,7 @@
search_results = Page.objects.none()
# Pagination
- paginator = Paginator(search_results, 10)
+ paginator = Paginator(search_results, 25)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
|
{"golden_diff": "diff --git a/website/materialize/templatetags/materialize.py b/website/materialize/templatetags/materialize.py\n--- a/website/materialize/templatetags/materialize.py\n+++ b/website/materialize/templatetags/materialize.py\n@@ -23,8 +23,7 @@\n 'field': field,\n 'prefix': prefix,\n }\n- html = t.render(c)\n- return html\n+ return t.render(c)\n \n \n @register.simple_tag\n@@ -36,3 +35,12 @@\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n+\n+\[email protected]_tag('materialize/pagination.html')\n+def materialize_pagination(page, url):\n+ return {\n+ 'page': page,\n+ 'paginator': page.paginator,\n+ 'url': url,\n+ }\ndiff --git a/website/search/views.py b/website/search/views.py\n--- a/website/search/views.py\n+++ b/website/search/views.py\n@@ -22,7 +22,7 @@\n search_results = Page.objects.none()\n \n # Pagination\n- paginator = Paginator(search_results, 10)\n+ paginator = Paginator(search_results, 25)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n", "issue": "Missing search template\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\n### Prerequisites\r\n\r\n* [X] Put an X between the brackets on this line if you have done all of the\r\nfollowing:\r\n * Reproduced the problem with clear cache.\r\n * (If running the application locally:) Made sure your running the newest version on the development branch\r\n * Checked that your issue isn't already filed: https://github.com/UTNkar/moore/issues\r\n\r\n### Description\r\n\r\nThe current search is now really working. Although you can search, the results are ugly and very spotty. \r\n\r\nGoal: Evaluate the searchable fields within Project Moore and make a search template.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Visit the home page\r\n2. Search something\r\n3. Be disappointed \u2639\ufe0f \r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\n", "before_files": [{"content": "from django import template\nfrom django.template import loader\n\nregister = template.Library()\n\n\ndef get_widget_name(field):\n return field.field.widget.__class__.__name__\n\n\ndef append_classes(field):\n field.field.label_suffix = ''\n classes = field.field.widget.attrs.get('class', '')\n classes += ' validate'\n if field.errors:\n classes += ' invalid'\n field.field.widget.attrs['class'] = classes\n\n\ndef render_field(template, field, prefix=None):\n t = loader.get_template(template)\n c = {\n 'field': field,\n 'prefix': prefix,\n }\n html = t.render(c)\n return html\n\n\[email protected]_tag\ndef materialize_field(field, prefix=None):\n widget = get_widget_name(field)\n if widget in ['TextInput', 'EmailInput', 'PasswordInput', 'Select',\n 'Textarea']:\n append_classes(field)\n return render_field('materialize/form/input.html', field, prefix)\n else:\n return field.as_widget()\n", "path": "website/materialize/templatetags/materialize.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.shortcuts import render\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n else:\n search_results = Page.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 10)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n })\n", "path": "website/search/views.py"}]}
| 1,335 | 301 |
gh_patches_debug_12092
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-86
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError in staged_files_only
```
$ pre-commit
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.
Traceback (most recent call last):
File ".../bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File ".../lib/python2.6/site-packages/pre_commit/util.py", line 52, in wrapper
return func(argv)
File ".../lib/python2.6/site-packages/pre_commit/run.py", line 59, in run
return commands.run(runner, args)
File ".../lib/python2.6/site-packages/pre_commit/commands.py", line 254, in run
with ctx:
File "/usr/lib64/python2.6/contextlib.py", line 16, in __enter__
return self.gen.next()
File ".../lib/python2.6/site-packages/pre_commit/staged_files_only.py", line 32, in staged_files_only
patch_file.write(diff_stdout)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 3795: ordinal not in range(128)
```
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 import contextlib
2 import logging
3 import time
4
5 from pre_commit.prefixed_command_runner import CalledProcessError
6
7
8 logger = logging.getLogger('pre_commit')
9
10
11 @contextlib.contextmanager
12 def staged_files_only(cmd_runner):
13 """Clear any unstaged changes from the git working directory inside this
14 context.
15
16 Args:
17 cmd_runner - PrefixedCommandRunner
18 """
19 # Determine if there are unstaged files
20 retcode, diff_stdout, _ = cmd_runner.run(
21 ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],
22 retcode=None,
23 )
24 if retcode and diff_stdout.strip():
25 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
26 logger.warning('Unstaged files detected.')
27 logger.info(
28 'Stashing unstaged files to {0}.'.format(patch_filename),
29 )
30 # Save the current unstaged changes as a patch
31 with open(patch_filename, 'w') as patch_file:
32 patch_file.write(diff_stdout)
33
34 # Clear the working directory of unstaged changes
35 cmd_runner.run(['git', 'checkout', '--', '.'])
36 try:
37 yield
38 finally:
39 # Try to apply the patch we saved
40 try:
41 cmd_runner.run(['git', 'apply', patch_filename])
42 except CalledProcessError:
43 logger.warning(
44 'Stashed changes conflicted with hook auto-fixes... '
45 'Rolling back fixes...'
46 )
47 # We failed to apply the patch, presumably due to fixes made
48 # by hooks.
49 # Roll back the changes made by hooks.
50 cmd_runner.run(['git', 'checkout', '--', '.'])
51 cmd_runner.run(['git', 'apply', patch_filename])
52 logger.info('Restored changes from {0}.'.format(patch_filename))
53 else:
54 # There weren't any staged files so we don't need to do anything
55 # special
56 yield
57
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -1,4 +1,5 @@
import contextlib
+import io
import logging
import time
@@ -28,7 +29,7 @@
'Stashing unstaged files to {0}.'.format(patch_filename),
)
# Save the current unstaged changes as a patch
- with open(patch_filename, 'w') as patch_file:
+ with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:
patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
|
{"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -1,4 +1,5 @@\n import contextlib\n+import io\n import logging\n import time\n \n@@ -28,7 +29,7 @@\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n- with open(patch_filename, 'w') as patch_file:\n+ with io.open(patch_filename, 'w', encoding='utf-8') as patch_file:\n patch_file.write(diff_stdout)\n \n # Clear the working directory of unstaged changes\n", "issue": "UnicodeDecodeError in staged_files_only\n```\n$ pre-commit\n[WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.\nTraceback (most recent call last):\n File \".../bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()\n File \".../lib/python2.6/site-packages/pre_commit/util.py\", line 52, in wrapper\n return func(argv)\n File \".../lib/python2.6/site-packages/pre_commit/run.py\", line 59, in run\n return commands.run(runner, args)\n File \".../lib/python2.6/site-packages/pre_commit/commands.py\", line 254, in run\n with ctx:\n File \"/usr/lib64/python2.6/contextlib.py\", line 16, in __enter__\n return self.gen.next()\n File \".../lib/python2.6/site-packages/pre_commit/staged_files_only.py\", line 32, in staged_files_only\n patch_file.write(diff_stdout)\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\xfc' in position 3795: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "import contextlib\nimport logging\nimport time\n\nfrom pre_commit.prefixed_command_runner import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, diff_stdout, _ = cmd_runner.run(\n ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n retcode=None,\n )\n if retcode and diff_stdout.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with open(patch_filename, 'w') as patch_file:\n patch_file.write(diff_stdout)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]}
| 1,348 | 160 |
gh_patches_debug_23952
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-458
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Owner change does not modify notifications.
When a certificate changes ownership, we should try to also update the notifications by removing the old defaults owners and applying new defaults.
</issue>
<code>
[start of lemur/certificates/schemas.py]
1 """
2 .. module: lemur.certificates.schemas
3 :platform: unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6 .. moduleauthor:: Kevin Glisson <[email protected]>
7 """
8 from flask import current_app
9 from marshmallow import fields, validates_schema, post_load, pre_load
10 from marshmallow.exceptions import ValidationError
11
12 from lemur.schemas import AssociatedAuthoritySchema, AssociatedDestinationSchema, AssociatedCertificateSchema, \
13 AssociatedNotificationSchema, PluginInputSchema, ExtensionSchema, AssociatedRoleSchema, EndpointNestedOutputSchema
14
15 from lemur.authorities.schemas import AuthorityNestedOutputSchema
16 from lemur.destinations.schemas import DestinationNestedOutputSchema
17 from lemur.notifications.schemas import NotificationNestedOutputSchema
18 from lemur.roles.schemas import RoleNestedOutputSchema
19 from lemur.domains.schemas import DomainNestedOutputSchema
20 from lemur.users.schemas import UserNestedOutputSchema
21
22 from lemur.common.schema import LemurInputSchema, LemurOutputSchema
23 from lemur.common import validators, missing
24 from lemur.notifications import service as notification_service
25
26
27 class CertificateSchema(LemurInputSchema):
28 owner = fields.Email(required=True)
29 description = fields.String()
30
31
32 class CertificateCreationSchema(CertificateSchema):
33 @post_load
34 def default_notification(self, data):
35 if not data['notifications']:
36 notification_name = "DEFAULT_{0}".format(data['owner'].split('@')[0].upper())
37 data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])
38
39 notification_name = 'DEFAULT_SECURITY'
40 data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL'))
41 return data
42
43
44 class CertificateInputSchema(CertificateCreationSchema):
45 name = fields.String()
46 common_name = fields.String(required=True, validate=validators.sensitive_domain)
47 authority = fields.Nested(AssociatedAuthoritySchema, required=True)
48
49 validity_start = fields.DateTime()
50 validity_end = fields.DateTime()
51 validity_years = fields.Integer()
52
53 destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
54 notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
55 replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
56 roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
57
58 csr = fields.String(validate=validators.csr)
59
60 # certificate body fields
61 organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))
62 organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))
63 location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))
64 country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))
65 state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))
66
67 extensions = fields.Nested(ExtensionSchema)
68
69 @validates_schema
70 def validate_dates(self, data):
71 validators.dates(data)
72
73 @pre_load
74 def ensure_dates(self, data):
75 return missing.convert_validity_years(data)
76
77
78 class CertificateEditInputSchema(CertificateSchema):
79 notify = fields.Boolean()
80 destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
81 notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
82 replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
83 roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
84
85
86 class CertificateNestedOutputSchema(LemurOutputSchema):
87 __envelope__ = False
88 id = fields.Integer()
89 active = fields.Boolean()
90 bits = fields.Integer()
91 body = fields.String()
92 chain = fields.String()
93 description = fields.String()
94 name = fields.String()
95 cn = fields.String()
96 not_after = fields.DateTime()
97 not_before = fields.DateTime()
98 owner = fields.Email()
99 status = fields.Boolean()
100 creator = fields.Nested(UserNestedOutputSchema)
101 issuer = fields.Nested(AuthorityNestedOutputSchema)
102
103
104 class CertificateOutputSchema(LemurOutputSchema):
105 id = fields.Integer()
106 active = fields.Boolean()
107 notify = fields.Boolean()
108 bits = fields.Integer()
109 body = fields.String()
110 chain = fields.String()
111 deleted = fields.Boolean(default=False)
112 description = fields.String()
113 issuer = fields.String()
114 name = fields.String()
115 cn = fields.String()
116 not_after = fields.DateTime()
117 not_before = fields.DateTime()
118 owner = fields.Email()
119 san = fields.Boolean()
120 serial = fields.String()
121 signing_algorithm = fields.String()
122 status = fields.Boolean()
123 user = fields.Nested(UserNestedOutputSchema)
124 domains = fields.Nested(DomainNestedOutputSchema, many=True)
125 destinations = fields.Nested(DestinationNestedOutputSchema, many=True)
126 notifications = fields.Nested(NotificationNestedOutputSchema, many=True)
127 replaces = fields.Nested(CertificateNestedOutputSchema, many=True)
128 authority = fields.Nested(AuthorityNestedOutputSchema)
129 roles = fields.Nested(RoleNestedOutputSchema, many=True)
130 endpoints = fields.Nested(EndpointNestedOutputSchema, many=True, missing=[])
131
132
133 class CertificateUploadInputSchema(CertificateCreationSchema):
134 name = fields.String()
135 notify = fields.Boolean(missing=True)
136
137 private_key = fields.String(validate=validators.private_key)
138 body = fields.String(required=True, validate=validators.public_certificate)
139 chain = fields.String(validate=validators.public_certificate) # TODO this could be multiple certificates
140
141 destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
142 notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
143 replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
144 roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
145
146 @validates_schema
147 def keys(self, data):
148 if data.get('destinations'):
149 if not data.get('private_key'):
150 raise ValidationError('Destinations require private key.')
151
152
153 class CertificateExportInputSchema(LemurInputSchema):
154 plugin = fields.Nested(PluginInputSchema)
155
156
157 certificate_input_schema = CertificateInputSchema()
158 certificate_output_schema = CertificateOutputSchema()
159 certificates_output_schema = CertificateOutputSchema(many=True)
160 certificate_upload_input_schema = CertificateUploadInputSchema()
161 certificate_export_input_schema = CertificateExportInputSchema()
162 certificate_edit_input_schema = CertificateEditInputSchema()
163
[end of lemur/certificates/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lemur/certificates/schemas.py b/lemur/certificates/schemas.py
--- a/lemur/certificates/schemas.py
+++ b/lemur/certificates/schemas.py
@@ -77,11 +77,25 @@
class CertificateEditInputSchema(CertificateSchema):
notify = fields.Boolean()
+ owner = fields.String()
destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
+ @post_load
+ def enforce_notifications(self, data):
+ """
+ Ensures that when an owner changes, default notifications are added for the new owner.
+ Old owner notifications are retained unless explicitly removed.
+ :param data:
+ :return:
+ """
+ if data['owner']:
+ notification_name = "DEFAULT_{0}".format(data['owner'].split('@')[0].upper())
+ data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])
+ return data
+
class CertificateNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
|
{"golden_diff": "diff --git a/lemur/certificates/schemas.py b/lemur/certificates/schemas.py\n--- a/lemur/certificates/schemas.py\n+++ b/lemur/certificates/schemas.py\n@@ -77,11 +77,25 @@\n \n class CertificateEditInputSchema(CertificateSchema):\n notify = fields.Boolean()\n+ owner = fields.String()\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n \n+ @post_load\n+ def enforce_notifications(self, data):\n+ \"\"\"\n+ Ensures that when an owner changes, default notifications are added for the new owner.\n+ Old owner notifications are retained unless explicitly removed.\n+ :param data:\n+ :return:\n+ \"\"\"\n+ if data['owner']:\n+ notification_name = \"DEFAULT_{0}\".format(data['owner'].split('@')[0].upper())\n+ data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])\n+ return data\n+\n \n class CertificateNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n", "issue": "Owner change does not modify notifications.\nWhen a certificate changes ownership, we should try to also update the notifications by removing the old defaults owners and applying new defaults. \n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.certificates.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom flask import current_app\nfrom marshmallow import fields, validates_schema, post_load, pre_load\nfrom marshmallow.exceptions import ValidationError\n\nfrom lemur.schemas import AssociatedAuthoritySchema, AssociatedDestinationSchema, AssociatedCertificateSchema, \\\n AssociatedNotificationSchema, PluginInputSchema, ExtensionSchema, AssociatedRoleSchema, EndpointNestedOutputSchema\n\nfrom lemur.authorities.schemas import AuthorityNestedOutputSchema\nfrom lemur.destinations.schemas import DestinationNestedOutputSchema\nfrom lemur.notifications.schemas import NotificationNestedOutputSchema\nfrom lemur.roles.schemas import RoleNestedOutputSchema\nfrom lemur.domains.schemas import DomainNestedOutputSchema\nfrom lemur.users.schemas import UserNestedOutputSchema\n\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.common import validators, missing\nfrom lemur.notifications import service as notification_service\n\n\nclass CertificateSchema(LemurInputSchema):\n owner = fields.Email(required=True)\n description = fields.String()\n\n\nclass CertificateCreationSchema(CertificateSchema):\n @post_load\n def default_notification(self, data):\n if not data['notifications']:\n notification_name = \"DEFAULT_{0}\".format(data['owner'].split('@')[0].upper())\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])\n\n notification_name = 'DEFAULT_SECURITY'\n data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL'))\n return data\n\n\nclass CertificateInputSchema(CertificateCreationSchema):\n name = fields.String()\n common_name = fields.String(required=True, validate=validators.sensitive_domain)\n authority = fields.Nested(AssociatedAuthoritySchema, required=True)\n\n validity_start = fields.DateTime()\n validity_end = fields.DateTime()\n validity_years = fields.Integer()\n\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n csr = fields.String(validate=validators.csr)\n\n # certificate body fields\n organizational_unit = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT'))\n organization = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_ORGANIZATION'))\n location = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_LOCATION'))\n country = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_COUNTRY'))\n state = fields.String(missing=lambda: current_app.config.get('LEMUR_DEFAULT_STATE'))\n\n extensions = fields.Nested(ExtensionSchema)\n\n @validates_schema\n def validate_dates(self, data):\n validators.dates(data)\n\n @pre_load\n def ensure_dates(self, data):\n return missing.convert_validity_years(data)\n\n\nclass CertificateEditInputSchema(CertificateSchema):\n notify = fields.Boolean()\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n\nclass CertificateNestedOutputSchema(LemurOutputSchema):\n __envelope__ = False\n id = fields.Integer()\n active = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n description = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n status = fields.Boolean()\n creator = fields.Nested(UserNestedOutputSchema)\n issuer = fields.Nested(AuthorityNestedOutputSchema)\n\n\nclass CertificateOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n active = fields.Boolean()\n notify = fields.Boolean()\n bits = fields.Integer()\n body = fields.String()\n chain = fields.String()\n deleted = fields.Boolean(default=False)\n description = fields.String()\n issuer = fields.String()\n name = fields.String()\n cn = fields.String()\n not_after = fields.DateTime()\n not_before = fields.DateTime()\n owner = fields.Email()\n san = fields.Boolean()\n serial = fields.String()\n signing_algorithm = fields.String()\n status = fields.Boolean()\n user = fields.Nested(UserNestedOutputSchema)\n domains = fields.Nested(DomainNestedOutputSchema, many=True)\n destinations = fields.Nested(DestinationNestedOutputSchema, many=True)\n notifications = fields.Nested(NotificationNestedOutputSchema, many=True)\n replaces = fields.Nested(CertificateNestedOutputSchema, many=True)\n authority = fields.Nested(AuthorityNestedOutputSchema)\n roles = fields.Nested(RoleNestedOutputSchema, many=True)\n endpoints = fields.Nested(EndpointNestedOutputSchema, many=True, missing=[])\n\n\nclass CertificateUploadInputSchema(CertificateCreationSchema):\n name = fields.String()\n notify = fields.Boolean(missing=True)\n\n private_key = fields.String(validate=validators.private_key)\n body = fields.String(required=True, validate=validators.public_certificate)\n chain = fields.String(validate=validators.public_certificate) # TODO this could be multiple certificates\n\n destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)\n notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)\n replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)\n roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)\n\n @validates_schema\n def keys(self, data):\n if data.get('destinations'):\n if not data.get('private_key'):\n raise ValidationError('Destinations require private key.')\n\n\nclass CertificateExportInputSchema(LemurInputSchema):\n plugin = fields.Nested(PluginInputSchema)\n\n\ncertificate_input_schema = CertificateInputSchema()\ncertificate_output_schema = CertificateOutputSchema()\ncertificates_output_schema = CertificateOutputSchema(many=True)\ncertificate_upload_input_schema = CertificateUploadInputSchema()\ncertificate_export_input_schema = CertificateExportInputSchema()\ncertificate_edit_input_schema = CertificateEditInputSchema()\n", "path": "lemur/certificates/schemas.py"}]}
| 2,379 | 297 |
gh_patches_debug_13481
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-382
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the logic in the ACSHandleStage
**Version**
main
**Describe the bug**
If there are ACS that point to the same content, pick the first RA instead of last
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
</issue>
<code>
[start of pulpcore/plugin/repo_version_utils.py]
1 from collections import defaultdict
2 from gettext import gettext as _
3 import logging
4
5 from django.db.models import Q
6
7
8 _logger = logging.getLogger(__name__)
9
10
11 def remove_duplicates(repository_version):
12 """
13 Inspect content additions in the `RepositoryVersion` and replace repository duplicates.
14
15 Some content can have two instances A and B which are unique, but cannot both exist together in
16 one repository. For example, pulp_file's content has `relative_path` for that file within the
17 repository.
18
19 Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked
20 against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer
21 "repository duplicates" replace existing "repository duplicates". Each Content model can define
22 a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
23 contain the same value for two content units, they are considered "repository duplicates".
24
25 After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
26 :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
27 parameter to `run()`.
28
29 Args:
30 repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
31 and possibly modified.
32 """
33 query_for_repo_duplicates_by_type = defaultdict(lambda: Q())
34 for item in repository_version.added():
35 detail_item = item.cast()
36 if detail_item.repo_key_fields == ():
37 continue
38 unit_q_dict = {
39 field: getattr(detail_item, field) for field in detail_item.repo_key_fields
40 }
41 item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)
42 query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query
43
44 for model in query_for_repo_duplicates_by_type:
45 _logger.debug(_("Removing duplicates for type: {}".format(model)))
46 qs = model.objects.filter(query_for_repo_duplicates_by_type[model])
47 repository_version.remove_content(qs)
48
[end of pulpcore/plugin/repo_version_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py
--- a/pulpcore/plugin/repo_version_utils.py
+++ b/pulpcore/plugin/repo_version_utils.py
@@ -22,10 +22,6 @@
a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`
contain the same value for two content units, they are considered "repository duplicates".
- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the
- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a
- parameter to `run()`.
-
Args:
repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked
and possibly modified.
|
{"golden_diff": "diff --git a/pulpcore/plugin/repo_version_utils.py b/pulpcore/plugin/repo_version_utils.py\n--- a/pulpcore/plugin/repo_version_utils.py\n+++ b/pulpcore/plugin/repo_version_utils.py\n@@ -22,10 +22,6 @@\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n \n- After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the\n- :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a\n- parameter to `run()`.\n-\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n", "issue": "Improve the logic in the ACSHandleStage\n**Version**\r\nmain\r\n\r\n**Describe the bug**\r\nIf there are ACS that point to the same content, pick the first RA instead of last\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom gettext import gettext as _\nimport logging\n\nfrom django.db.models import Q\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef remove_duplicates(repository_version):\n \"\"\"\n Inspect content additions in the `RepositoryVersion` and replace repository duplicates.\n\n Some content can have two instances A and B which are unique, but cannot both exist together in\n one repository. For example, pulp_file's content has `relative_path` for that file within the\n repository.\n\n Any content newly added to the :class:`~pulpcore.plugin.models.RepositoryVersion` is checked\n against existing content in the :class:`~pulpcore.plugin.models.RepositoryVersion` with newer\n \"repository duplicates\" replace existing \"repository duplicates\". Each Content model can define\n a `repo_key_fields` attribute with the field names to be compared. If all `repo_key_fields`\n contain the same value for two content units, they are considered \"repository duplicates\".\n\n After instantiating `RemoveDuplicates` call it with the `run()` method and pass in the\n :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked and possibly modified as a\n parameter to `run()`.\n\n Args:\n repository_version: The :class:`~pulpcore.plugin.models.RepositoryVersion` to be checked\n and possibly modified.\n \"\"\"\n query_for_repo_duplicates_by_type = defaultdict(lambda: Q())\n for item in repository_version.added():\n detail_item = item.cast()\n if detail_item.repo_key_fields == ():\n continue\n unit_q_dict = {\n field: getattr(detail_item, field) for field in detail_item.repo_key_fields\n }\n item_query = Q(**unit_q_dict) & ~Q(pk=detail_item.pk)\n query_for_repo_duplicates_by_type[detail_item._meta.model] |= item_query\n\n for model in query_for_repo_duplicates_by_type:\n _logger.debug(_(\"Removing duplicates for type: {}\".format(model)))\n qs = model.objects.filter(query_for_repo_duplicates_by_type[model])\n repository_version.remove_content(qs)\n", "path": "pulpcore/plugin/repo_version_utils.py"}]}
| 1,163 | 183 |
gh_patches_debug_34351
|
rasdani/github-patches
|
git_diff
|
scikit-hep__awkward-2065
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ak.fill_none` doesn't support option-in-record
### Version of Awkward Array
main
### Description and code to reproduce
The following example is left unchanged by `ak.fill_none`:
```python
import awkward as ak
record = ak.zip({
'x': [1, None],
'y': [2, 3]
})
result = ak.fill_none(record, 0, axis=-1)
result.show()
```
I'm fairly sure that this is a bug, because `ak.is_none` walks successfully through records. My understanding of records is that it's only reducers that cannot peer through them.
Of course, this example would imply
```python
record = ak.zip({
"x": [1, 4],
"y": [2, 3]
}).mask[[True, False]]
assert ak.fill_none(record, 0, axis=-1).to_list() == [
{"x": 1, "y": 2},
0
]
```
I think this is reasonable; again, `is_none` behaves identically to this. Maybe we _do_ want a parameter in future, e.g. `visit_records` or some such.
</issue>
<code>
[start of src/awkward/operations/ak_fill_none.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak._nplikes.NumpyMetadata.instance()
8 cpu = ak._backends.NumpyBackend.instance()
9
10
11 def fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):
12 """
13 Args:
14 array: Array-like data (anything #ak.to_layout recognizes).
15 value: Data with which to replace None.
16 axis (None or int): If None, replace all None values in the array
17 with the given value; if an int, The dimension at which this
18 operation is applied. The outermost dimension is `0`, followed
19 by `1`, etc., and negative values count backward from the
20 innermost: `-1` is the innermost dimension, `-2` is the next
21 level up, etc.
22 highlevel (bool): If True, return an #ak.Array; otherwise, return
23 a low-level #ak.contents.Content subclass.
24 behavior (None or dict): Custom #ak.behavior for the output array, if
25 high-level.
26
27 Replaces missing values (None) with a given `value`.
28
29 For example, in the following
30
31 >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
32
33 The None values could be replaced with `0` by
34
35 >>> ak.fill_none(array, 0)
36 <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
37
38 The replacement value doesn't strictly need the same type as the
39 surrounding data. For example, the None values could also be replaced
40 by a string.
41
42 >>> ak.fill_none(array, "hi")
43 <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>
44
45 The list content now has a union type:
46
47 >>> ak.fill_none(array, "hi").type.show()
48 3 * var * union[
49 float64,
50 string
51 ]
52
53 The values could be floating-point numbers or strings.
54 """
55 with ak._errors.OperationErrorContext(
56 "ak.fill_none",
57 dict(
58 array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior
59 ),
60 ):
61 return _impl(array, value, axis, highlevel, behavior)
62
63
64 def _impl(array, value, axis, highlevel, behavior):
65 arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
66 behavior = ak._util.behavior_of(array, behavior=behavior)
67 backend = ak._backends.backend_of(arraylayout, default=cpu)
68
69 # Convert value type to appropriate layout
70 if (
71 isinstance(value, np.ndarray)
72 and issubclass(value.dtype.type, (np.bool_, np.number))
73 and len(value.shape) != 0
74 ):
75 valuelayout = ak.operations.to_layout(
76 backend.nplike.asarray(value)[np.newaxis],
77 allow_record=False,
78 allow_other=False,
79 )
80 elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
81 isinstance(value, np.ndarray)
82 and issubclass(value.dtype.type, (np.bool_, np.number))
83 ):
84 valuelayout = ak.operations.to_layout(
85 backend.nplike.asarray(value), allow_record=False, allow_other=False
86 )
87 elif (
88 ak._util.is_sized_iterable(value)
89 and not (isinstance(value, (str, bytes)))
90 or isinstance(value, (ak.highlevel.Record, ak.record.Record))
91 ):
92 valuelayout = ak.operations.to_layout(
93 value, allow_record=True, allow_other=False
94 )
95 if isinstance(valuelayout, ak.record.Record):
96 valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
97 elif len(valuelayout) == 0:
98 offsets = ak.index.Index64(
99 backend.index_nplike.array([0, 0], dtype=np.int64)
100 )
101 valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)
102 else:
103 valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)
104 else:
105 valuelayout = ak.operations.to_layout(
106 [value], allow_record=False, allow_other=False
107 )
108
109 def maybe_fillna(layout):
110 if layout.is_option:
111 return ak._do.fill_none(layout, valuelayout)
112 else:
113 return layout
114
115 if axis is None:
116
117 def action(layout, continuation, **kwargs):
118 return maybe_fillna(continuation())
119
120 else:
121
122 def action(layout, depth, **kwargs):
123 posaxis = ak._util.maybe_posaxis(layout, axis, depth)
124 if posaxis is not None and posaxis + 1 < depth:
125 return layout
126 elif posaxis is not None and posaxis + 1 == depth:
127 return maybe_fillna(layout)
128 elif layout.is_leaf:
129 raise ak._errors.wrap_error(
130 np.AxisError(
131 f"axis={axis} exceeds the depth of this array ({depth})"
132 )
133 )
134
135 out = ak._do.recursively_apply(arraylayout, action, behavior)
136
137 return ak._util.wrap(out, behavior, highlevel)
138
[end of src/awkward/operations/ak_fill_none.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py
--- a/src/awkward/operations/ak_fill_none.py
+++ b/src/awkward/operations/ak_fill_none.py
@@ -63,7 +63,7 @@
def _impl(array, value, axis, highlevel, behavior):
arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
- behavior = ak._util.behavior_of(array, behavior=behavior)
+ behavior = ak._util.behavior_of(array, value, behavior=behavior)
backend = ak._backends.backend_of(arraylayout, default=cpu)
# Convert value type to appropriate layout
@@ -106,25 +106,24 @@
[value], allow_record=False, allow_other=False
)
- def maybe_fillna(layout):
- if layout.is_option:
- return ak._do.fill_none(layout, valuelayout)
- else:
- return layout
-
if axis is None:
def action(layout, continuation, **kwargs):
- return maybe_fillna(continuation())
+ if layout.is_option:
+ return ak._do.fill_none(continuation(), valuelayout)
else:
def action(layout, depth, **kwargs):
posaxis = ak._util.maybe_posaxis(layout, axis, depth)
- if posaxis is not None and posaxis + 1 < depth:
- return layout
- elif posaxis is not None and posaxis + 1 == depth:
- return maybe_fillna(layout)
+ if posaxis is not None and posaxis + 1 == depth:
+ if layout.is_union or layout.is_record:
+ return None
+ elif layout.is_option:
+ return ak._do.fill_none(layout, valuelayout)
+ else:
+ return layout
+
elif layout.is_leaf:
raise ak._errors.wrap_error(
np.AxisError(
@@ -133,5 +132,4 @@
)
out = ak._do.recursively_apply(arraylayout, action, behavior)
-
return ak._util.wrap(out, behavior, highlevel)
|
{"golden_diff": "diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py\n--- a/src/awkward/operations/ak_fill_none.py\n+++ b/src/awkward/operations/ak_fill_none.py\n@@ -63,7 +63,7 @@\n \n def _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n- behavior = ak._util.behavior_of(array, behavior=behavior)\n+ behavior = ak._util.behavior_of(array, value, behavior=behavior)\n backend = ak._backends.backend_of(arraylayout, default=cpu)\n \n # Convert value type to appropriate layout\n@@ -106,25 +106,24 @@\n [value], allow_record=False, allow_other=False\n )\n \n- def maybe_fillna(layout):\n- if layout.is_option:\n- return ak._do.fill_none(layout, valuelayout)\n- else:\n- return layout\n-\n if axis is None:\n \n def action(layout, continuation, **kwargs):\n- return maybe_fillna(continuation())\n+ if layout.is_option:\n+ return ak._do.fill_none(continuation(), valuelayout)\n \n else:\n \n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n- if posaxis is not None and posaxis + 1 < depth:\n- return layout\n- elif posaxis is not None and posaxis + 1 == depth:\n- return maybe_fillna(layout)\n+ if posaxis is not None and posaxis + 1 == depth:\n+ if layout.is_union or layout.is_record:\n+ return None\n+ elif layout.is_option:\n+ return ak._do.fill_none(layout, valuelayout)\n+ else:\n+ return layout\n+\n elif layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(\n@@ -133,5 +132,4 @@\n )\n \n out = ak._do.recursively_apply(arraylayout, action, behavior)\n-\n return ak._util.wrap(out, behavior, highlevel)\n", "issue": "`ak.fill_none` doesn't support option-in-record\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThe following example is left unchanged by `ak.fill_none`:\r\n```python\r\nimport awkward as ak\r\n\r\nrecord = ak.zip({\r\n 'x': [1, None],\r\n 'y': [2, 3]\r\n})\r\n\r\nresult = ak.fill_none(record, 0, axis=-1)\r\nresult.show()\r\n```\r\n\r\nI'm fairly sure that this is a bug, because `ak.is_none` walks successfully through records. My understanding of records is that it's only reducers that cannot peer through them. \r\n\r\nOf course, this example would imply\r\n```python\r\nrecord = ak.zip({\r\n \"x\": [1, 4], \r\n\t\"y\": [2, 3]\r\n}).mask[[True, False]]\r\nassert ak.fill_none(record, 0, axis=-1).to_list() == [\r\n {\"x\": 1, \"y\": 2},\r\n 0\r\n]\r\n```\r\n\r\nI think this is reasonable; again, `is_none` behaves identically to this. Maybe we _do_ want a parameter in future, e.g. `visit_records` or some such.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak._nplikes.NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following\n\n >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.fill_none(array, \"hi\").type.show()\n 3 * var * union[\n float64,\n string\n ]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n behavior = ak._util.behavior_of(array, behavior=behavior)\n backend = ak._backends.backend_of(arraylayout, default=cpu)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value)[np.newaxis],\n allow_record=False,\n allow_other=False,\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n backend.index_nplike.array([0, 0], dtype=np.int64)\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_option:\n return ak._do.fill_none(layout, valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, continuation, **kwargs):\n return maybe_fillna(continuation())\n\n else:\n\n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if posaxis is not None and posaxis + 1 < depth:\n return layout\n elif posaxis is not None and posaxis + 1 == depth:\n return maybe_fillna(layout)\n elif layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(\n f\"axis={axis} exceeds the depth of this array ({depth})\"\n )\n )\n\n out = ak._do.recursively_apply(arraylayout, action, behavior)\n\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}]}
| 2,311 | 490 |
gh_patches_debug_13262
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5477
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plot is empty when log scale is used
Bokeh incorrectly creates an empty plot with the following:
``` python
from bokeh.plotting import figure, output_file, show
ys = [4.471799184102565e-05, 0.0009856299875536934, 0.0011045119899790734]
xs = range(len(ys))
output_file("log.html")
p = figure(y_axis_type="log")
p.line(xs, ys)
show(p)
```
The problem only occurs if `y_axis_type="log"` is used.
It seems when the range of y values is too narrow bokeh fails to compute the y-axis range. If I multiple the y values by 100, it plots properly.
Tested on bokeh version: 0.11.1 py34_0
</issue>
<code>
[start of bokeh/models/ranges.py]
1 """ Models for describing different kinds of ranges of values
2 in different kinds of spaces (e.g., continuous or categorical)
3 and with options for "auto sizing".
4
5 """
6 from __future__ import absolute_import
7
8 from ..model import Model
9 from ..core.enums import StartEnd
10 from ..core.properties import abstract
11 from ..core.properties import (
12 Auto, Bool, Int, Float, String, Datetime, TimeDelta, Instance, List,
13 Either, Enum, MinMaxBounds,
14 )
15 from .callbacks import Callback
16 from .renderers import Renderer
17
18
19 @abstract
20 class Range(Model):
21 """ A base class for all range types. ``Range`` is not generally
22 useful to instantiate on its own.
23
24 """
25
26 callback = Instance(Callback, help="""
27 A callback to run in the browser whenever the range is updated.
28 """)
29
30
31 class Range1d(Range):
32 """ A fixed, closed range [start, end] in a continuous scalar
33 dimension.
34
35 In addition to supplying ``start`` and ``end`` keyword arguments
36 to the ``Range1d`` initializer, you can also instantiate with
37 the convenience syntax::
38
39 Range(0, 10) # equivalent to Range(start=0, end=10)
40
41 """
42
43 start = Either(Float, Datetime, Int, default=0, help="""
44 The start of the range.
45 """)
46
47 end = Either(Float, Datetime, Int, default=1, help="""
48 The end of the range.
49 """)
50
51 bounds = MinMaxBounds(accept_datetime=True, default=None, help="""
52 The bounds that the range is allowed to go to - typically used to prevent
53 the user from panning/zooming/etc away from the data.
54
55 If set to ``'auto'``, the bounds will be computed to the start and end of the Range.
56
57 Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is
58 increasing or decreasing, the first item should be the minimum value of the range and the
59 second item should be the maximum. Setting min > max will result in a ``ValueError``.
60
61 By default, bounds are ``None`` and your plot to pan/zoom as far as you want. If you only
62 want to constrain one end of the plot, you can set min or max to None.
63
64 Examples:
65
66 Range1d(0, 1, bounds='auto') # Auto-bounded to 0 and 1 (Default behavior)
67 Range1d(start=0, end=1, bounds=(0, None)) # Maximum is unbounded, minimum bounded to 0
68 """)
69
70 min_interval = Either(Float, TimeDelta, Int, default=None, help="""
71 The level that the range is allowed to zoom in, expressed as the
72 minimum visible interval. If set to ``None`` (default), the minimum
73 interval is not bound. Can be a timedelta. """)
74
75 max_interval = Either(Float, TimeDelta, Int, default=None, help="""
76 The level that the range is allowed to zoom out, expressed as the
77 maximum visible interval. Can be a timedelta. Note that ``bounds`` can
78 impose an implicit constraint on the maximum interval as well. """)
79
80 def __init__(self, *args, **kwargs):
81 if args and ('start' in kwargs or 'end' in kwargs):
82 raise ValueError("'start' and 'end' keywords cannot be used with positional arguments")
83 if args and len(args) != 2:
84 raise ValueError('Only Range1d(start, end) acceptable when using positional arguments')
85
86 if args:
87 kwargs['start'] = args[0]
88 kwargs['end'] = args[1]
89
90 super(Range1d, self).__init__(**kwargs)
91
92
93 @abstract
94 class DataRange(Range):
95 """ A base class for all data range types. ``DataRange`` is not
96 generally useful to instantiate on its own.
97
98 """
99
100 names = List(String, help="""
101 A list of names to query for. If set, only renderers that
102 have a matching value for their ``name`` attribute will be used
103 for autoranging.
104 """)
105
106 renderers = List(Instance(Renderer), help="""
107 An explicit list of renderers to autorange against. If unset,
108 defaults to all renderers on a plot.
109 """)
110
111
112 class DataRange1d(DataRange):
113 """ An auto-fitting range in a continuous scalar dimension.
114 The upper and lower bounds are set to the min and max of the data.
115 """
116
117 range_padding = Float(default=0.1, help="""
118 A percentage of the total range size to add as padding to
119 the range start and end.
120 """)
121
122 start = Float(help="""
123 An explicitly supplied range start. If provided, will override
124 automatically computed start value.
125 """)
126
127 end = Float(help="""
128 An explicitly supplied range end. If provided, will override
129 automatically computed end value.
130 """)
131
132 bounds = MinMaxBounds(accept_datetime=False, default=None, help="""
133 The bounds that the range is allowed to go to - typically used to prevent
134 the user from panning/zooming/etc away from the data.
135
136 By default, the bounds will be None, allowing your plot to pan/zoom as far as you want.
137 If bounds are 'auto' they will be computed to be the same as the start and end of the DataRange1d.
138
139 Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is
140 increasing or decreasing, the first item should be the minimum value of the range and the
141 second item should be the maximum. Setting min > max will result in a ``ValueError``.
142
143 If you only want to constrain one end of the plot, you can set min or max to
144 ``None`` e.g. ``DataRange1d(bounds=(None, 12))``
145 """)
146
147 min_interval = Float(default=None, help="""
148 The level that the range is allowed to zoom in, expressed as the
149 minimum visible interval. If set to ``None`` (default), the minimum
150 interval is not bound.""")
151
152 max_interval = Float(default=None, help="""
153 The level that the range is allowed to zoom out, expressed as the
154 maximum visible interval. Note that ``bounds`` can impose an
155 implicit constraint on the maximum interval as well.""")
156
157 flipped = Bool(default=False, help="""
158 Whether the range should be "flipped" from its normal direction when
159 auto-ranging.
160 """)
161
162 follow = Enum(StartEnd, default=None, help="""
163 Configure the data to follow one or the other data extreme, with a
164 maximum range size of ``follow_interval``.
165
166 If set to ``"start"`` then the range will adjust so that ``start`` always
167 corresponds to the minimum data value (or maximum, if ``flipped`` is
168 ``True``).
169
170 If set to ``"end"`` then the range will adjust so that ``end`` always
171 corresponds to the maximum data value (or minimum, if ``flipped`` is
172 ``True``).
173
174 If set to ``None`` (default), then auto-ranging does not follow, and
175 the range will encompass both the minimum and maximum data values.
176
177 ``follow`` cannot be used with bounds, and if set, bounds will be set to ``None``.
178 """)
179
180 follow_interval = Float(default=None, help="""
181 If ``follow`` is set to ``"start"`` or ``"end"`` then the range will
182 always be constrained to that::
183
184 abs(r.start - r.end) <= follow_interval
185
186 is maintained.
187
188 """)
189
190 default_span = Float(default=2.0, help="""
191 A default width for the interval, in case ``start`` is equal to ``end``.
192 """)
193
194 def __init__(self, *args, **kwargs):
195 if kwargs.get('follow') is not None:
196 kwargs['bounds'] = None
197 super(DataRange1d, self).__init__(**kwargs)
198
199
200 class FactorRange(Range):
201 """ A range in a categorical dimension.
202
203 In addition to supplying ``factors`` keyword argument to the
204 ``FactorRange`` initializer, you can also instantiate with
205 the convenience syntax::
206
207 FactorRange("foo", "bar") # equivalent to FactorRange(factors=["foo", "bar"])
208
209 .. note::
210 ``FactorRange`` may be renamed to ``CategoricalRange`` in
211 the future.
212
213 """
214
215 offset = Float(0, help="""
216 An offset to the (synthetic) range (default: 0)
217
218 .. note::
219 The primary usage of this is to support compatibility and integration
220 with other plotting systems, and will not generally of interest to
221 most users.
222
223 """)
224
225 factors = Either(List(String), List(Int), help="""
226 A list of string or integer factors (categories) to comprise
227 this categorical range.
228 """)
229
230 bounds = Either(Auto, List(String), List(Int), default=None, help="""
231 The bounds that the range is allowed to go to - typically used to prevent
232 the user from panning/zooming/etc away from the data.
233
234 Unlike Range1d and DataRange1d, factors do not have an order and so a min and max cannot be
235 provied in the same way. bounds accepts a list of factors, that constrain the displayed factors.
236
237 By default, bounds are ``None``, allows unlimited panning or zooming.
238
239 If ``bounds='auto'``, bounds will be the same as factors and the plot will not be able to
240 pan or zoom beyond the first and last items in factors.
241
242 If you provide a list, then only the factors that are in that list will be displayed on the
243 plot and the plot will not pan or zoom outside the first and last items in the shortened
244 factors list. Note the order of factors is the defining order for your plot.
245
246 Values of bounds that are not in factors are acceptable and will simply have no impact
247 on the plot.
248
249 Examples:
250
251 Auto behavior:
252 x_range = FactorRange(factors=["apples", "dogs", "peaches", "bananas", "pigs"], bounds='auto')
253
254 The plot will display all the factors and you will not be able to pan left of apples or right
255 of pigs.
256
257 Constraining behavior:
258 x_range = FactorRange(factors=["apples", "dogs", "peaches", "bananas", "pigs"], bounds=["apples", "bananas", "peaches"])
259
260 The plot will display the chart with only the factors ["apples", "peaches", "bananas"] (in that order)
261 and the plot will not pan left of apples or right of bananas.
262 """)
263
264 min_interval = Int(default=None, help="""
265 The level that the range is allowed to zoom in, expressed as the
266 minimum number of visible categories. If set to ``None`` (default),
267 the minimum interval is not bound.""")
268
269 max_interval = Int(default=None, help="""
270 The level that the range is allowed to zoom out, expressed as the
271 maximum number of visible categories. Note that ``bounds`` can
272 impose an implicit constraint on the maximum interval as well.""")
273
274 def __init__(self, *args, **kwargs):
275 if args and "factors" in kwargs:
276 raise ValueError("'factors' keyword cannot be used with positional arguments")
277 elif args:
278 kwargs['factors'] = list(args)
279 super(FactorRange, self).__init__(**kwargs)
280
[end of bokeh/models/ranges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bokeh/models/ranges.py b/bokeh/models/ranges.py
--- a/bokeh/models/ranges.py
+++ b/bokeh/models/ranges.py
@@ -115,7 +115,7 @@
"""
range_padding = Float(default=0.1, help="""
- A percentage of the total range size to add as padding to
+ A fraction of the total range size to add as padding to
the range start and end.
""")
@@ -188,7 +188,8 @@
""")
default_span = Float(default=2.0, help="""
- A default width for the interval, in case ``start`` is equal to ``end``.
+ A default width for the interval, in case ``start`` is equal to ``end``
+ (if used with a log axis, default_span is in powers of 10).
""")
def __init__(self, *args, **kwargs):
|
{"golden_diff": "diff --git a/bokeh/models/ranges.py b/bokeh/models/ranges.py\n--- a/bokeh/models/ranges.py\n+++ b/bokeh/models/ranges.py\n@@ -115,7 +115,7 @@\n \"\"\"\n \n range_padding = Float(default=0.1, help=\"\"\"\n- A percentage of the total range size to add as padding to\n+ A fraction of the total range size to add as padding to\n the range start and end.\n \"\"\")\n \n@@ -188,7 +188,8 @@\n \"\"\")\n \n default_span = Float(default=2.0, help=\"\"\"\n- A default width for the interval, in case ``start`` is equal to ``end``.\n+ A default width for the interval, in case ``start`` is equal to ``end``\n+ (if used with a log axis, default_span is in powers of 10).\n \"\"\")\n \n def __init__(self, *args, **kwargs):\n", "issue": "plot is empty when log scale is used\nBokeh incorrectly creates an empty plot with the following:\n\n``` python\nfrom bokeh.plotting import figure, output_file, show\nys = [4.471799184102565e-05, 0.0009856299875536934, 0.0011045119899790734]\nxs = range(len(ys))\noutput_file(\"log.html\")\np = figure(y_axis_type=\"log\")\np.line(xs, ys)\nshow(p)\n```\n\nThe problem only occurs if `y_axis_type=\"log\"` is used.\n\nIt seems when the range of y values is too narrow bokeh fails to compute the y-axis range. If I multiple the y values by 100, it plots properly.\n\nTested on bokeh version: 0.11.1 py34_0\n\n", "before_files": [{"content": "\"\"\" Models for describing different kinds of ranges of values\nin different kinds of spaces (e.g., continuous or categorical)\nand with options for \"auto sizing\".\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..model import Model\nfrom ..core.enums import StartEnd\nfrom ..core.properties import abstract\nfrom ..core.properties import (\n Auto, Bool, Int, Float, String, Datetime, TimeDelta, Instance, List,\n Either, Enum, MinMaxBounds,\n)\nfrom .callbacks import Callback\nfrom .renderers import Renderer\n\n\n@abstract\nclass Range(Model):\n \"\"\" A base class for all range types. ``Range`` is not generally\n useful to instantiate on its own.\n\n \"\"\"\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the range is updated.\n \"\"\")\n\n\nclass Range1d(Range):\n \"\"\" A fixed, closed range [start, end] in a continuous scalar\n dimension.\n\n In addition to supplying ``start`` and ``end`` keyword arguments\n to the ``Range1d`` initializer, you can also instantiate with\n the convenience syntax::\n\n Range(0, 10) # equivalent to Range(start=0, end=10)\n\n \"\"\"\n\n start = Either(Float, Datetime, Int, default=0, help=\"\"\"\n The start of the range.\n \"\"\")\n\n end = Either(Float, Datetime, Int, default=1, help=\"\"\"\n The end of the range.\n \"\"\")\n\n bounds = MinMaxBounds(accept_datetime=True, default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n If set to ``'auto'``, the bounds will be computed to the start and end of the Range.\n\n Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is\n increasing or decreasing, the first item should be the minimum value of the range and the\n second item should be the maximum. Setting min > max will result in a ``ValueError``.\n\n By default, bounds are ``None`` and your plot to pan/zoom as far as you want. If you only\n want to constrain one end of the plot, you can set min or max to None.\n\n Examples:\n\n Range1d(0, 1, bounds='auto') # Auto-bounded to 0 and 1 (Default behavior)\n Range1d(start=0, end=1, bounds=(0, None)) # Maximum is unbounded, minimum bounded to 0\n \"\"\")\n\n min_interval = Either(Float, TimeDelta, Int, default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum visible interval. If set to ``None`` (default), the minimum\n interval is not bound. Can be a timedelta. \"\"\")\n\n max_interval = Either(Float, TimeDelta, Int, default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum visible interval. Can be a timedelta. Note that ``bounds`` can\n impose an implicit constraint on the maximum interval as well. \"\"\")\n\n def __init__(self, *args, **kwargs):\n if args and ('start' in kwargs or 'end' in kwargs):\n raise ValueError(\"'start' and 'end' keywords cannot be used with positional arguments\")\n if args and len(args) != 2:\n raise ValueError('Only Range1d(start, end) acceptable when using positional arguments')\n\n if args:\n kwargs['start'] = args[0]\n kwargs['end'] = args[1]\n\n super(Range1d, self).__init__(**kwargs)\n\n\n@abstract\nclass DataRange(Range):\n \"\"\" A base class for all data range types. ``DataRange`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n names = List(String, help=\"\"\"\n A list of names to query for. If set, only renderers that\n have a matching value for their ``name`` attribute will be used\n for autoranging.\n \"\"\")\n\n renderers = List(Instance(Renderer), help=\"\"\"\n An explicit list of renderers to autorange against. If unset,\n defaults to all renderers on a plot.\n \"\"\")\n\n\nclass DataRange1d(DataRange):\n \"\"\" An auto-fitting range in a continuous scalar dimension.\n The upper and lower bounds are set to the min and max of the data.\n \"\"\"\n\n range_padding = Float(default=0.1, help=\"\"\"\n A percentage of the total range size to add as padding to\n the range start and end.\n \"\"\")\n\n start = Float(help=\"\"\"\n An explicitly supplied range start. If provided, will override\n automatically computed start value.\n \"\"\")\n\n end = Float(help=\"\"\"\n An explicitly supplied range end. If provided, will override\n automatically computed end value.\n \"\"\")\n\n bounds = MinMaxBounds(accept_datetime=False, default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n By default, the bounds will be None, allowing your plot to pan/zoom as far as you want.\n If bounds are 'auto' they will be computed to be the same as the start and end of the DataRange1d.\n\n Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is\n increasing or decreasing, the first item should be the minimum value of the range and the\n second item should be the maximum. Setting min > max will result in a ``ValueError``.\n\n If you only want to constrain one end of the plot, you can set min or max to\n ``None`` e.g. ``DataRange1d(bounds=(None, 12))``\n \"\"\")\n\n min_interval = Float(default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum visible interval. If set to ``None`` (default), the minimum\n interval is not bound.\"\"\")\n\n max_interval = Float(default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum visible interval. Note that ``bounds`` can impose an\n implicit constraint on the maximum interval as well.\"\"\")\n\n flipped = Bool(default=False, help=\"\"\"\n Whether the range should be \"flipped\" from its normal direction when\n auto-ranging.\n \"\"\")\n\n follow = Enum(StartEnd, default=None, help=\"\"\"\n Configure the data to follow one or the other data extreme, with a\n maximum range size of ``follow_interval``.\n\n If set to ``\"start\"`` then the range will adjust so that ``start`` always\n corresponds to the minimum data value (or maximum, if ``flipped`` is\n ``True``).\n\n If set to ``\"end\"`` then the range will adjust so that ``end`` always\n corresponds to the maximum data value (or minimum, if ``flipped`` is\n ``True``).\n\n If set to ``None`` (default), then auto-ranging does not follow, and\n the range will encompass both the minimum and maximum data values.\n\n ``follow`` cannot be used with bounds, and if set, bounds will be set to ``None``.\n \"\"\")\n\n follow_interval = Float(default=None, help=\"\"\"\n If ``follow`` is set to ``\"start\"`` or ``\"end\"`` then the range will\n always be constrained to that::\n\n abs(r.start - r.end) <= follow_interval\n\n is maintained.\n\n \"\"\")\n\n default_span = Float(default=2.0, help=\"\"\"\n A default width for the interval, in case ``start`` is equal to ``end``.\n \"\"\")\n\n def __init__(self, *args, **kwargs):\n if kwargs.get('follow') is not None:\n kwargs['bounds'] = None\n super(DataRange1d, self).__init__(**kwargs)\n\n\nclass FactorRange(Range):\n \"\"\" A range in a categorical dimension.\n\n In addition to supplying ``factors`` keyword argument to the\n ``FactorRange`` initializer, you can also instantiate with\n the convenience syntax::\n\n FactorRange(\"foo\", \"bar\") # equivalent to FactorRange(factors=[\"foo\", \"bar\"])\n\n .. note::\n ``FactorRange`` may be renamed to ``CategoricalRange`` in\n the future.\n\n \"\"\"\n\n offset = Float(0, help=\"\"\"\n An offset to the (synthetic) range (default: 0)\n\n .. note::\n The primary usage of this is to support compatibility and integration\n with other plotting systems, and will not generally of interest to\n most users.\n\n \"\"\")\n\n factors = Either(List(String), List(Int), help=\"\"\"\n A list of string or integer factors (categories) to comprise\n this categorical range.\n \"\"\")\n\n bounds = Either(Auto, List(String), List(Int), default=None, help=\"\"\"\n The bounds that the range is allowed to go to - typically used to prevent\n the user from panning/zooming/etc away from the data.\n\n Unlike Range1d and DataRange1d, factors do not have an order and so a min and max cannot be\n provied in the same way. bounds accepts a list of factors, that constrain the displayed factors.\n\n By default, bounds are ``None``, allows unlimited panning or zooming.\n\n If ``bounds='auto'``, bounds will be the same as factors and the plot will not be able to\n pan or zoom beyond the first and last items in factors.\n\n If you provide a list, then only the factors that are in that list will be displayed on the\n plot and the plot will not pan or zoom outside the first and last items in the shortened\n factors list. Note the order of factors is the defining order for your plot.\n\n Values of bounds that are not in factors are acceptable and will simply have no impact\n on the plot.\n\n Examples:\n\n Auto behavior:\n x_range = FactorRange(factors=[\"apples\", \"dogs\", \"peaches\", \"bananas\", \"pigs\"], bounds='auto')\n\n The plot will display all the factors and you will not be able to pan left of apples or right\n of pigs.\n\n Constraining behavior:\n x_range = FactorRange(factors=[\"apples\", \"dogs\", \"peaches\", \"bananas\", \"pigs\"], bounds=[\"apples\", \"bananas\", \"peaches\"])\n\n The plot will display the chart with only the factors [\"apples\", \"peaches\", \"bananas\"] (in that order)\n and the plot will not pan left of apples or right of bananas.\n \"\"\")\n\n min_interval = Int(default=None, help=\"\"\"\n The level that the range is allowed to zoom in, expressed as the\n minimum number of visible categories. If set to ``None`` (default),\n the minimum interval is not bound.\"\"\")\n\n max_interval = Int(default=None, help=\"\"\"\n The level that the range is allowed to zoom out, expressed as the\n maximum number of visible categories. Note that ``bounds`` can\n impose an implicit constraint on the maximum interval as well.\"\"\")\n\n def __init__(self, *args, **kwargs):\n if args and \"factors\" in kwargs:\n raise ValueError(\"'factors' keyword cannot be used with positional arguments\")\n elif args:\n kwargs['factors'] = list(args)\n super(FactorRange, self).__init__(**kwargs)\n", "path": "bokeh/models/ranges.py"}]}
| 4,054 | 222 |
gh_patches_debug_4095
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-6083
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip install mne should install numpy and scipy
Currently, the `mne` PyPI package doesn't have any dependencies, but I think it should really treat `numpy` and `scipy` as hard dependencies (and automatically install them if they are not present).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Copyright (C) 2011-2017 Alexandre Gramfort
4 # <[email protected]>
5
6 import os
7 import os.path as op
8
9 from setuptools import setup
10
11 # get the version (don't import mne here, so dependencies are not needed)
12 version = None
13 with open(op.join('mne', '__init__.py'), 'r') as fid:
14 for line in (line.strip() for line in fid):
15 if line.startswith('__version__'):
16 version = line.split('=')[1].strip().strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 descr = """MNE python project for MEG and EEG data analysis."""
23
24 DISTNAME = 'mne'
25 DESCRIPTION = descr
26 MAINTAINER = 'Alexandre Gramfort'
27 MAINTAINER_EMAIL = '[email protected]'
28 URL = 'http://martinos.org/mne'
29 LICENSE = 'BSD (3-clause)'
30 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
31 VERSION = version
32
33
34 def package_tree(pkgroot):
35 """Get the submodule list."""
36 # Adapted from VisPy
37 path = op.dirname(__file__)
38 subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
39 for i in os.walk(op.join(path, pkgroot))
40 if '__init__.py' in i[2]]
41 return sorted(subdirs)
42
43
44 if __name__ == "__main__":
45 if op.exists('MANIFEST'):
46 os.remove('MANIFEST')
47
48 setup(name=DISTNAME,
49 maintainer=MAINTAINER,
50 include_package_data=True,
51 maintainer_email=MAINTAINER_EMAIL,
52 description=DESCRIPTION,
53 license=LICENSE,
54 url=URL,
55 version=VERSION,
56 download_url=DOWNLOAD_URL,
57 long_description=open('README.rst').read(),
58 zip_safe=False, # the package can run out of an .egg file
59 classifiers=['Intended Audience :: Science/Research',
60 'Intended Audience :: Developers',
61 'License :: OSI Approved',
62 'Programming Language :: Python',
63 'Topic :: Software Development',
64 'Topic :: Scientific/Engineering',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: POSIX',
67 'Operating System :: Unix',
68 'Operating System :: MacOS',
69 'Programming Language :: Python :: 3',
70 ],
71 platforms='any',
72 packages=package_tree('mne'),
73 package_data={'mne': [
74 op.join('data', '*.sel'),
75 op.join('data', 'icos.fif.gz'),
76 op.join('data', 'coil_def*.dat'),
77 op.join('data', 'helmets', '*.fif.gz'),
78 op.join('data', 'FreeSurferColorLUT.txt'),
79 op.join('data', 'image', '*gif'),
80 op.join('data', 'image', '*lout'),
81 op.join('data', 'fsaverage', '*.fif'),
82 op.join('channels', 'data', 'layouts', '*.lout'),
83 op.join('channels', 'data', 'layouts', '*.lay'),
84 op.join('channels', 'data', 'montages', '*.sfp'),
85 op.join('channels', 'data', 'montages', '*.txt'),
86 op.join('channels', 'data', 'montages', '*.elc'),
87 op.join('channels', 'data', 'neighbors', '*.mat'),
88 op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),
89 op.join('gui', 'help', '*.json'),
90 op.join('html', '*.js'),
91 op.join('html', '*.css'),
92 op.join('io', 'artemis123', 'resources', '*.csv'),
93 op.join('io', 'edf', 'gdf_encodes.txt')
94 ]},
95 scripts=['bin/mne'])
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,6 +69,7 @@
'Programming Language :: Python :: 3',
],
platforms='any',
+ install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],
packages=package_tree('mne'),
package_data={'mne': [
op.join('data', '*.sel'),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,6 +69,7 @@\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n+ install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n", "issue": "pip install mne should install numpy and scipy\nCurrently, the `mne` PyPI package doesn't have any dependencies, but I think it should really treat `numpy` and `scipy` as hard dependencies (and automatically install them if they are not present).\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (C) 2011-2017 Alexandre Gramfort\n# <[email protected]>\n\nimport os\nimport os.path as op\n\nfrom setuptools import setup\n\n# get the version (don't import mne here, so dependencies are not needed)\nversion = None\nwith open(op.join('mne', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"MNE python project for MEG and EEG data analysis.\"\"\"\n\nDISTNAME = 'mne'\nDESCRIPTION = descr\nMAINTAINER = 'Alexandre Gramfort'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://martinos.org/mne'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'\nVERSION = version\n\n\ndef package_tree(pkgroot):\n \"\"\"Get the submodule list.\"\"\"\n # Adapted from VisPy\n path = op.dirname(__file__)\n subdirs = [op.relpath(i[0], path).replace(op.sep, '.')\n for i in os.walk(op.join(path, pkgroot))\n if '__init__.py' in i[2]]\n return sorted(subdirs)\n\n\nif __name__ == \"__main__\":\n if op.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n zip_safe=False, # the package can run out of an .egg file\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n ],\n platforms='any',\n packages=package_tree('mne'),\n package_data={'mne': [\n op.join('data', '*.sel'),\n op.join('data', 'icos.fif.gz'),\n op.join('data', 'coil_def*.dat'),\n op.join('data', 'helmets', '*.fif.gz'),\n op.join('data', 'FreeSurferColorLUT.txt'),\n op.join('data', 'image', '*gif'),\n op.join('data', 'image', '*lout'),\n op.join('data', 'fsaverage', '*.fif'),\n op.join('channels', 'data', 'layouts', '*.lout'),\n op.join('channels', 'data', 'layouts', '*.lay'),\n op.join('channels', 'data', 'montages', '*.sfp'),\n op.join('channels', 'data', 'montages', '*.txt'),\n op.join('channels', 'data', 'montages', '*.elc'),\n op.join('channels', 'data', 'neighbors', '*.mat'),\n op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),\n op.join('gui', 'help', '*.json'),\n op.join('html', '*.js'),\n op.join('html', '*.css'),\n op.join('io', 'artemis123', 'resources', '*.csv'),\n op.join('io', 'edf', 'gdf_encodes.txt')\n ]},\n scripts=['bin/mne'])\n", "path": "setup.py"}]}
| 1,617 | 100 |
gh_patches_debug_7655
|
rasdani/github-patches
|
git_diff
|
openfun__marsha-99
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Staging, PreProduction & Production settings
## Purpose
At the time of writing, available configurations for Marsha are: `Development` and `Test`. We need more settings for missing environments.
## Proposal
- [ ] Add `Production` configuration
- [ ] Derive `Staging` and `Preproduction` configurations from `Production`
</issue>
<code>
[start of marsha/settings.py]
1 """Django settings for marsha project.
2
3 Uses django-configurations to manage environments inheritance and the loading of some
4 config from the environment
5
6 """
7
8 from datetime import timedelta
9 import os
10
11 from django.utils.translation import gettext_lazy as _
12
13 from configurations import Configuration, values
14 import sentry_sdk
15 from sentry_sdk.integrations.django import DjangoIntegration
16
17
18 class Base(Configuration):
19 """Base configuration every configuration (aka environment) should inherit from.
20
21 It depends on an environment variable that SHOULD be defined:
22 - DJANGO_SECRET_KEY
23
24 You may also want to override default configuration by setting the following
25 environment variables:
26 - DJANGO_DEBUG
27 - DATABASE_URL
28 """
29
30 BASE_DIR = os.path.dirname(__file__)
31 DATA_DIR = values.Value(os.path.join("/", "data"))
32
33 # Static files (CSS, JavaScript, Images)
34 STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
35 STATIC_URL = "/static/"
36 MEDIA_URL = "/media/"
37 # Allow to configure location of static/media files for non-Docker installation
38 MEDIA_ROOT = values.Value(os.path.join(str(DATA_DIR), "media"))
39 STATIC_ROOT = values.Value(os.path.join(str(DATA_DIR), "static"))
40
41 SECRET_KEY = values.SecretValue()
42
43 DEBUG = values.BooleanValue(False)
44
45 DATABASES = {
46 "default": {
47 "ENGINE": values.Value(
48 "django.db.backends.postgresql_psycopg2",
49 environ_name="DATABASE_ENGINE",
50 environ_prefix=None,
51 ),
52 "NAME": values.Value(
53 "marsha", environ_name="POSTGRES_DB", environ_prefix=None
54 ),
55 "USER": values.Value(
56 "marsha_user", environ_name="POSTGRES_USER", environ_prefix=None
57 ),
58 "PASSWORD": values.Value(
59 "pass", environ_name="POSTGRES_PASSWORD", environ_prefix=None
60 ),
61 "HOST": values.Value(
62 "localhost", environ_name="POSTGRES_HOST", environ_prefix=None
63 ),
64 "PORT": values.Value(
65 5432, environ_name="POSTGRES_PORT", environ_prefix=None
66 ),
67 }
68 }
69
70 ALLOWED_HOSTS = []
71
72 # Application definition
73
74 INSTALLED_APPS = [
75 "django.contrib.admin.apps.SimpleAdminConfig",
76 "django.contrib.auth",
77 "django.contrib.contenttypes",
78 "django.contrib.sessions",
79 "django.contrib.messages",
80 "django.contrib.staticfiles",
81 "django_extensions",
82 "rest_framework",
83 "marsha.core.apps.CoreConfig",
84 ]
85
86 MIDDLEWARE = [
87 "django.middleware.security.SecurityMiddleware",
88 "django.contrib.sessions.middleware.SessionMiddleware",
89 "django.middleware.common.CommonMiddleware",
90 "django.middleware.csrf.CsrfViewMiddleware",
91 "django.contrib.auth.middleware.AuthenticationMiddleware",
92 "django.contrib.messages.middleware.MessageMiddleware",
93 "django.middleware.clickjacking.XFrameOptionsMiddleware",
94 ]
95
96 ROOT_URLCONF = "marsha.urls"
97
98 TEMPLATES = [
99 {
100 "BACKEND": "django.template.backends.django.DjangoTemplates",
101 "DIRS": [],
102 "APP_DIRS": True,
103 "OPTIONS": {
104 "context_processors": [
105 "django.template.context_processors.debug",
106 "django.template.context_processors.request",
107 "django.contrib.auth.context_processors.auth",
108 "django.contrib.messages.context_processors.messages",
109 ]
110 },
111 }
112 ]
113
114 AUTH_USER_MODEL = "core.User"
115
116 WSGI_APPLICATION = "marsha.wsgi.application"
117
118 REST_FRAMEWORK = {
119 "DEFAULT_AUTHENTICATION_CLASSES": (
120 "rest_framework_simplejwt.authentication.JWTTokenUserAuthentication",
121 )
122 }
123
124 # Password validation
125 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
126 AUTH_PASSWORD_VALIDATORS = [
127 {
128 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
129 },
130 {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
131 {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
132 {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
133 ]
134
135 JWT_SIGNING_KEY = values.SecretValue()
136
137 # Internationalization
138 # https://docs.djangoproject.com/en/2.0/topics/i18n/
139
140 LANGUAGE_CODE = "en-us"
141
142 LANGUAGES = [("en", _("english")), ("fr", _("french"))]
143
144 # Internationalization
145 TIME_ZONE = "UTC"
146 USE_I18N = True
147 USE_L10N = True
148 USE_TZ = True
149
150 VIDEO_RESOLUTIONS = [144, 240, 480, 720, 1080]
151
152 # AWS
153 AWS_ACCESS_KEY_ID = values.SecretValue()
154 AWS_SECRET_ACCESS_KEY = values.SecretValue()
155 AWS_DEFAULT_REGION = values.Value("eu-west-1")
156 UPDATE_STATE_SHARED_SECRETS = values.ListValue()
157
158 # Cloud Front key pair for signed urls
159 CLOUDFRONT_URL = values.SecretValue()
160 CLOUDFRONT_ACCESS_KEY_ID = values.Value(None)
161 CLOUDFRONT_PRIVATE_KEY_PATH = values.Value(
162 os.path.join(BASE_DIR, "..", ".ssh", "cloudfront_private_key")
163 )
164 CLOUDFRONT_SIGNED_URLS_ACTIVE = True
165 CLOUDFRONT_SIGNED_URLS_VALIDITY = 2 * 60 * 60 # 2 hours
166
167 # pylint: disable=invalid-name
168 @property
169 def SIMPLE_JWT(self):
170 """Define settings for `djangorestframework_simplejwt`.
171
172 The JWT_SIGNING_KEY must be evaluated late as the jwt library check for string type.
173 """
174 return {
175 "ACCESS_TOKEN_LIFETIME": timedelta(days=1),
176 "ALGORITHM": "HS256",
177 "SIGNING_KEY": str(self.JWT_SIGNING_KEY),
178 "USER_ID_CLAIM": "video_id",
179 "AUTH_TOKEN_CLASSES": ("rest_framework_simplejwt.tokens.AccessToken",),
180 }
181
182 @classmethod
183 def post_setup(cls):
184 """Post setup configuration.
185
186 This is the place where you can configure settings that require other
187 settings to be loaded.
188 """
189 super().post_setup()
190
191 # The DJANGO_SENTRY_DSN environment variable should be set to activate
192 # sentry for an environment
193 sentry_dsn = values.Value(None, environ_name="SENTRY_DSN")
194 if sentry_dsn is not None:
195 sentry_sdk.init(
196 dsn=sentry_dsn,
197 environment=cls.__name__.lower(),
198 integrations=[DjangoIntegration()],
199 )
200
201
202 class Development(Base):
203 """Development environment settings.
204
205 We set ``DEBUG`` to ``True`` by default, configure the server to respond to all hosts,
206 and use a local sqlite database by default.
207 """
208
209 DEBUG = values.BooleanValue(True)
210 ALLOWED_HOSTS = ["*"]
211
212 AWS_SOURCE_BUCKET_NAME = "development-marsha-source"
213
214
215 class Test(Base):
216 """Test environment settings."""
217
218 AWS_SOURCE_BUCKET_NAME = "test-marsha-source"
219
220 CLOUDFRONT_SIGNED_URLS_ACTIVE = False
221
[end of marsha/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/marsha/settings.py b/marsha/settings.py
--- a/marsha/settings.py
+++ b/marsha/settings.py
@@ -218,3 +218,27 @@
AWS_SOURCE_BUCKET_NAME = "test-marsha-source"
CLOUDFRONT_SIGNED_URLS_ACTIVE = False
+
+
+class Production(Base):
+ """Production environment settings.
+
+ You must define the DJANGO_ALLOWED_HOSTS environment variable in Production
+ configuration (and derived configurations):
+
+ DJANGO_ALLOWED_HOSTS="foo.com,foo.fr"
+ """
+
+ ALLOWED_HOSTS = values.ListValue(None)
+
+
+class Staging(Production):
+ """Staging environment settings."""
+
+ pass
+
+
+class PreProduction(Production):
+ """Pre-production environment settings."""
+
+ pass
|
{"golden_diff": "diff --git a/marsha/settings.py b/marsha/settings.py\n--- a/marsha/settings.py\n+++ b/marsha/settings.py\n@@ -218,3 +218,27 @@\n AWS_SOURCE_BUCKET_NAME = \"test-marsha-source\"\n \n CLOUDFRONT_SIGNED_URLS_ACTIVE = False\n+\n+\n+class Production(Base):\n+ \"\"\"Production environment settings.\n+\n+ You must define the DJANGO_ALLOWED_HOSTS environment variable in Production\n+ configuration (and derived configurations):\n+\n+ DJANGO_ALLOWED_HOSTS=\"foo.com,foo.fr\"\n+ \"\"\"\n+\n+ ALLOWED_HOSTS = values.ListValue(None)\n+\n+\n+class Staging(Production):\n+ \"\"\"Staging environment settings.\"\"\"\n+\n+ pass\n+\n+\n+class PreProduction(Production):\n+ \"\"\"Pre-production environment settings.\"\"\"\n+\n+ pass\n", "issue": "Add Staging, PreProduction & Production settings\n## Purpose\r\n\r\nAt the time of writing, available configurations for Marsha are: `Development` and `Test`. We need more settings for missing environments. \r\n\r\n## Proposal\r\n\r\n- [ ] Add `Production` configuration\r\n- [ ] Derive `Staging` and `Preproduction` configurations from `Production`\n", "before_files": [{"content": "\"\"\"Django settings for marsha project.\n\nUses django-configurations to manage environments inheritance and the loading of some\nconfig from the environment\n\n\"\"\"\n\nfrom datetime import timedelta\nimport os\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom configurations import Configuration, values\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\n\nclass Base(Configuration):\n \"\"\"Base configuration every configuration (aka environment) should inherit from.\n\n It depends on an environment variable that SHOULD be defined:\n - DJANGO_SECRET_KEY\n\n You may also want to override default configuration by setting the following\n environment variables:\n - DJANGO_DEBUG\n - DATABASE_URL\n \"\"\"\n\n BASE_DIR = os.path.dirname(__file__)\n DATA_DIR = values.Value(os.path.join(\"/\", \"data\"))\n\n # Static files (CSS, JavaScript, Images)\n STATICFILES_DIRS = (os.path.join(BASE_DIR, \"static\"),)\n STATIC_URL = \"/static/\"\n MEDIA_URL = \"/media/\"\n # Allow to configure location of static/media files for non-Docker installation\n MEDIA_ROOT = values.Value(os.path.join(str(DATA_DIR), \"media\"))\n STATIC_ROOT = values.Value(os.path.join(str(DATA_DIR), \"static\"))\n\n SECRET_KEY = values.SecretValue()\n\n DEBUG = values.BooleanValue(False)\n\n DATABASES = {\n \"default\": {\n \"ENGINE\": values.Value(\n \"django.db.backends.postgresql_psycopg2\",\n environ_name=\"DATABASE_ENGINE\",\n environ_prefix=None,\n ),\n \"NAME\": values.Value(\n \"marsha\", environ_name=\"POSTGRES_DB\", environ_prefix=None\n ),\n \"USER\": values.Value(\n \"marsha_user\", environ_name=\"POSTGRES_USER\", environ_prefix=None\n ),\n \"PASSWORD\": values.Value(\n \"pass\", environ_name=\"POSTGRES_PASSWORD\", environ_prefix=None\n ),\n \"HOST\": values.Value(\n \"localhost\", environ_name=\"POSTGRES_HOST\", environ_prefix=None\n ),\n \"PORT\": values.Value(\n 5432, environ_name=\"POSTGRES_PORT\", environ_prefix=None\n ),\n }\n }\n\n ALLOWED_HOSTS = []\n\n # Application definition\n\n INSTALLED_APPS = [\n \"django.contrib.admin.apps.SimpleAdminConfig\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"rest_framework\",\n \"marsha.core.apps.CoreConfig\",\n ]\n\n MIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n ]\n\n ROOT_URLCONF = \"marsha.urls\"\n\n TEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n },\n }\n ]\n\n AUTH_USER_MODEL = \"core.User\"\n\n WSGI_APPLICATION = \"marsha.wsgi.application\"\n\n REST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework_simplejwt.authentication.JWTTokenUserAuthentication\",\n )\n }\n\n # Password validation\n # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n AUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\"\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n ]\n\n JWT_SIGNING_KEY = values.SecretValue()\n\n # Internationalization\n # https://docs.djangoproject.com/en/2.0/topics/i18n/\n\n LANGUAGE_CODE = \"en-us\"\n\n LANGUAGES = [(\"en\", _(\"english\")), (\"fr\", _(\"french\"))]\n\n # Internationalization\n TIME_ZONE = \"UTC\"\n USE_I18N = True\n USE_L10N = True\n USE_TZ = True\n\n VIDEO_RESOLUTIONS = [144, 240, 480, 720, 1080]\n\n # AWS\n AWS_ACCESS_KEY_ID = values.SecretValue()\n AWS_SECRET_ACCESS_KEY = values.SecretValue()\n AWS_DEFAULT_REGION = values.Value(\"eu-west-1\")\n UPDATE_STATE_SHARED_SECRETS = values.ListValue()\n\n # Cloud Front key pair for signed urls\n CLOUDFRONT_URL = values.SecretValue()\n CLOUDFRONT_ACCESS_KEY_ID = values.Value(None)\n CLOUDFRONT_PRIVATE_KEY_PATH = values.Value(\n os.path.join(BASE_DIR, \"..\", \".ssh\", \"cloudfront_private_key\")\n )\n CLOUDFRONT_SIGNED_URLS_ACTIVE = True\n CLOUDFRONT_SIGNED_URLS_VALIDITY = 2 * 60 * 60 # 2 hours\n\n # pylint: disable=invalid-name\n @property\n def SIMPLE_JWT(self):\n \"\"\"Define settings for `djangorestframework_simplejwt`.\n\n The JWT_SIGNING_KEY must be evaluated late as the jwt library check for string type.\n \"\"\"\n return {\n \"ACCESS_TOKEN_LIFETIME\": timedelta(days=1),\n \"ALGORITHM\": \"HS256\",\n \"SIGNING_KEY\": str(self.JWT_SIGNING_KEY),\n \"USER_ID_CLAIM\": \"video_id\",\n \"AUTH_TOKEN_CLASSES\": (\"rest_framework_simplejwt.tokens.AccessToken\",),\n }\n\n @classmethod\n def post_setup(cls):\n \"\"\"Post setup configuration.\n\n This is the place where you can configure settings that require other\n settings to be loaded.\n \"\"\"\n super().post_setup()\n\n # The DJANGO_SENTRY_DSN environment variable should be set to activate\n # sentry for an environment\n sentry_dsn = values.Value(None, environ_name=\"SENTRY_DSN\")\n if sentry_dsn is not None:\n sentry_sdk.init(\n dsn=sentry_dsn,\n environment=cls.__name__.lower(),\n integrations=[DjangoIntegration()],\n )\n\n\nclass Development(Base):\n \"\"\"Development environment settings.\n\n We set ``DEBUG`` to ``True`` by default, configure the server to respond to all hosts,\n and use a local sqlite database by default.\n \"\"\"\n\n DEBUG = values.BooleanValue(True)\n ALLOWED_HOSTS = [\"*\"]\n\n AWS_SOURCE_BUCKET_NAME = \"development-marsha-source\"\n\n\nclass Test(Base):\n \"\"\"Test environment settings.\"\"\"\n\n AWS_SOURCE_BUCKET_NAME = \"test-marsha-source\"\n\n CLOUDFRONT_SIGNED_URLS_ACTIVE = False\n", "path": "marsha/settings.py"}]}
| 2,695 | 187 |
gh_patches_debug_4828
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2078
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Run REPL using `python -m hy`
Figuring out how to get a REPL is not trivial. What do you think about making `python -m hy` do it?
</issue>
<code>
[start of hy/__main__.py]
1 import hy # NOQA
2 import sys
3
4 # This just mocks the normalish behavior of the Python interp. Helpful to aid
5 # with shimming existing apps that don't really "work" with Hy.
6 #
7 # You could say this script helps Hyjack a file.
8 #
9
10
11 if len(sys.argv) > 1:
12 sys.argv.pop(0)
13 hy.importer._import_from_path('__main__', sys.argv[0])
14 sys.exit(0) # right?
15
[end of hy/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hy/__main__.py b/hy/__main__.py
--- a/hy/__main__.py
+++ b/hy/__main__.py
@@ -1,14 +1,8 @@
-import hy # NOQA
import sys
-# This just mocks the normalish behavior of the Python interp. Helpful to aid
-# with shimming existing apps that don't really "work" with Hy.
-#
-# You could say this script helps Hyjack a file.
-#
+from hy.cmdline import hy_main
+# Running hy as a module (e.g. `python -m hy`)
+# is equivalent to running the main `hy` command.
-if len(sys.argv) > 1:
- sys.argv.pop(0)
- hy.importer._import_from_path('__main__', sys.argv[0])
- sys.exit(0) # right?
+sys.exit(hy_main())
|
{"golden_diff": "diff --git a/hy/__main__.py b/hy/__main__.py\n--- a/hy/__main__.py\n+++ b/hy/__main__.py\n@@ -1,14 +1,8 @@\n-import hy # NOQA\n import sys\n \n-# This just mocks the normalish behavior of the Python interp. Helpful to aid\n-# with shimming existing apps that don't really \"work\" with Hy.\n-#\n-# You could say this script helps Hyjack a file.\n-#\n+from hy.cmdline import hy_main\n \n+# Running hy as a module (e.g. `python -m hy`)\n+# is equivalent to running the main `hy` command.\n \n-if len(sys.argv) > 1:\n- sys.argv.pop(0)\n- hy.importer._import_from_path('__main__', sys.argv[0])\n- sys.exit(0) # right?\n+sys.exit(hy_main())\n", "issue": "Run REPL using `python -m hy`\nFiguring out how to get a REPL is not trivial. What do you think about making `python -m hy` do it?\n", "before_files": [{"content": "import hy # NOQA\nimport sys\n\n# This just mocks the normalish behavior of the Python interp. Helpful to aid\n# with shimming existing apps that don't really \"work\" with Hy.\n#\n# You could say this script helps Hyjack a file.\n#\n\n\nif len(sys.argv) > 1:\n sys.argv.pop(0)\n hy.importer._import_from_path('__main__', sys.argv[0])\n sys.exit(0) # right?\n", "path": "hy/__main__.py"}]}
| 694 | 199 |
gh_patches_debug_21487
|
rasdani/github-patches
|
git_diff
|
apache__tvm-14908
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ci] `last-successful` job is disabled
The job from #10056: https://github.com/apache/tvm/actions/workflows/update_last_successful_branch.yml was disabled a few months ago in concert with the change to split up the Jenkins CI job from one job into many jobs (see #13337). To fix this we'd need to update the CI checking script to know about the new job names and find the last commit in which they all passed.
cc @Mousius @areusch @gigiblender @leandron
</issue>
<code>
[start of ci/scripts/github/update_branch.py]
1 #!/usr/bin/env python3
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 import os
20 import json
21 import argparse
22 import sys
23 from pathlib import Path
24 from typing import Any, Dict
25
26 # Hackery to enable importing of utils from ci/scripts/jenkins
27 REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
28 sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
29
30 from git_utils import git, GitHubRepo, parse_remote
31
32
33 _commit_query_fields = """
34 messageHeadline
35 oid
36 statusCheckRollup {
37 contexts(last:100) {
38 nodes {
39 ... on CheckRun {
40 conclusion
41 status
42 name
43 checkSuite {
44 workflowRun {
45 workflow {
46 name
47 }
48 }
49 }
50 }
51 ... on StatusContext {
52 context
53 state
54 }
55 }
56 }
57 }
58 """
59
60
61 def commits_query(user: str, repo: str, cursor: str = None):
62 """
63 Create a GraphQL query to find the last N commits along with their statuses
64 and some metadata (paginated after 'cursor')
65 """
66 after = ""
67 if cursor is not None:
68 after = f', after:"{cursor}"'
69
70 return f"""
71 {{
72 repository(name: "{repo}", owner: "{user}") {{
73 defaultBranchRef {{
74 target {{
75 ... on Commit {{
76 history(first: 15{after}) {{
77 edges {{ cursor }}
78 nodes {{
79 {_commit_query_fields}
80 }}
81 }}
82 }}
83 }}
84 }}
85 }}
86 }}
87 """
88
89
90 def commit_passed_ci(commit: Dict[str, Any]) -> bool:
91 """
92 Returns true if all of a commit's statuses are SUCCESS
93 """
94 statuses = commit["statusCheckRollup"]["contexts"]["nodes"]
95
96 # GitHub Actions statuses are different from external GitHub statuses, so
97 # unify them into 1 representation
98 # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads
99 unified_statuses = []
100 for status in statuses:
101 if "context" in status:
102 # Parse non-GHA status
103 unified_statuses.append((status["context"], status["state"] == "SUCCESS"))
104 else:
105 # Parse GitHub Actions item
106 workflow = status["checkSuite"]["workflowRun"]["workflow"]["name"]
107 name = f"{workflow} / {status['name']}"
108 unified_statuses.append((name, status["conclusion"] == "SUCCESS"))
109
110 print(f"Statuses on {commit['oid']}:", json.dumps(unified_statuses, indent=2))
111
112 # Assert that specific jobs are present in the commit statuses (i.e. don't
113 # approve if CI was broken and didn't schedule a job)
114 expected_jobs = {"tvm-ci/branch"}
115 job_names = {name for name, status in unified_statuses}
116 for job in expected_jobs:
117 if job not in job_names:
118 # Did not find expected job name
119 return False
120
121 passed_ci = all(status for name, status in unified_statuses)
122 return passed_ci
123
124
125 def update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:
126 git(["fetch", "origin", sha])
127 git(["reset", "--hard", "FETCH_HEAD"])
128 try:
129 git(["branch", "-D", branch_name])
130 except RuntimeError:
131 # Ignore failures (i.e. the branch did not exist in the first place)
132 pass
133 git(["checkout", "-b", branch_name])
134
135 # Create and push the branch
136 git(["push", "origin", "--force", branch_name])
137 print(f"Pushed branch {branch_name} with commit {sha}")
138
139
140 if __name__ == "__main__":
141 help = "Push the a branch to the last commit that passed all CI runs"
142 parser = argparse.ArgumentParser(description=help)
143 parser.add_argument("--remote", default="origin", help="ssh remote to parse")
144 parser.add_argument("--dry-run", action="store_true", help="don't submit to GitHub")
145 parser.add_argument("--branch", default="last-successful", help="branch name")
146 parser.add_argument(
147 "--testonly-json", help="(testing) data to use instead of fetching from GitHub"
148 )
149 args = parser.parse_args()
150
151 remote = git(["config", "--get", f"remote.{args.remote}.url"])
152 user, repo = parse_remote(remote)
153 # TODO: Remove this before landing
154 user, repo = ("apache", "tvm")
155
156 if args.testonly_json:
157 r = json.loads(args.testonly_json)
158 else:
159 github = GitHubRepo(token=os.environ["GITHUB_TOKEN"], user=user, repo=repo)
160 q = commits_query(user, repo)
161 r = github.graphql(q)
162
163 commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
164
165 # Limit GraphQL pagination
166 MAX_COMMITS_TO_CHECK = 50
167 i = 0
168
169 while i < MAX_COMMITS_TO_CHECK:
170 # Check each commit
171 for commit in commits:
172 if commit_passed_ci(commit):
173 print(f"Found last good commit: {commit['oid']}: {commit['messageHeadline']}")
174 if not args.dry_run:
175 update_branch(
176 user=user,
177 repo=repo,
178 sha=commit["oid"],
179 branch_name=args.branch,
180 )
181 # Nothing to do after updating the branch, exit early
182 exit(0)
183
184 # No good commit found, proceed to next page of results
185 edges = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["edges"]
186 if len(edges) == 0:
187 break
188 else:
189 q = commits_query(user, repo, cursor=edges[-1]["cursor"])
190 r = github.graphql(q)
191 commits = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]["nodes"]
192
193 # Backstop to prevent looking through all the past commits
194 i += len(commits)
195
196 print(f"No good commits found in the last {len(commits)} commits")
197 exit(1)
198
[end of ci/scripts/github/update_branch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ci/scripts/github/update_branch.py b/ci/scripts/github/update_branch.py
--- a/ci/scripts/github/update_branch.py
+++ b/ci/scripts/github/update_branch.py
@@ -87,6 +87,22 @@
"""
+EXPECTED_CI_JOBS = [
+ "cross-isa-minimal/branch",
+ "gpu/branch",
+ "hexagon/branch",
+ "arm/branch",
+ "cortexm/branch",
+ "cpu/branch",
+ "docker/branch",
+ "i386/branch",
+ "lint/branch",
+ "minimal/branch",
+ "riscv/branch",
+ "wasm/branch",
+]
+
+
def commit_passed_ci(commit: Dict[str, Any]) -> bool:
"""
Returns true if all of a commit's statuses are SUCCESS
@@ -111,9 +127,8 @@
# Assert that specific jobs are present in the commit statuses (i.e. don't
# approve if CI was broken and didn't schedule a job)
- expected_jobs = {"tvm-ci/branch"}
job_names = {name for name, status in unified_statuses}
- for job in expected_jobs:
+ for job in EXPECTED_CI_JOBS:
if job not in job_names:
# Did not find expected job name
return False
|
{"golden_diff": "diff --git a/ci/scripts/github/update_branch.py b/ci/scripts/github/update_branch.py\n--- a/ci/scripts/github/update_branch.py\n+++ b/ci/scripts/github/update_branch.py\n@@ -87,6 +87,22 @@\n \"\"\"\n \n \n+EXPECTED_CI_JOBS = [\n+ \"cross-isa-minimal/branch\",\n+ \"gpu/branch\",\n+ \"hexagon/branch\",\n+ \"arm/branch\",\n+ \"cortexm/branch\",\n+ \"cpu/branch\",\n+ \"docker/branch\",\n+ \"i386/branch\",\n+ \"lint/branch\",\n+ \"minimal/branch\",\n+ \"riscv/branch\",\n+ \"wasm/branch\",\n+]\n+\n+\n def commit_passed_ci(commit: Dict[str, Any]) -> bool:\n \"\"\"\n Returns true if all of a commit's statuses are SUCCESS\n@@ -111,9 +127,8 @@\n \n # Assert that specific jobs are present in the commit statuses (i.e. don't\n # approve if CI was broken and didn't schedule a job)\n- expected_jobs = {\"tvm-ci/branch\"}\n job_names = {name for name, status in unified_statuses}\n- for job in expected_jobs:\n+ for job in EXPECTED_CI_JOBS:\n if job not in job_names:\n # Did not find expected job name\n return False\n", "issue": "[ci] `last-successful` job is disabled\nThe job from #10056: https://github.com/apache/tvm/actions/workflows/update_last_successful_branch.yml was disabled a few months ago in concert with the change to split up the Jenkins CI job from one job into many jobs (see #13337). To fix this we'd need to update the CI checking script to know about the new job names and find the last commit in which they all passed.\n\ncc @Mousius @areusch @gigiblender @leandron\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport json\nimport argparse\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\n# Hackery to enable importing of utils from ci/scripts/jenkins\nREPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent\nsys.path.append(str(REPO_ROOT / \"ci\" / \"scripts\" / \"jenkins\"))\n\nfrom git_utils import git, GitHubRepo, parse_remote\n\n\n_commit_query_fields = \"\"\"\n messageHeadline\n oid\n statusCheckRollup {\n contexts(last:100) {\n nodes {\n ... on CheckRun {\n conclusion\n status\n name\n checkSuite {\n workflowRun {\n workflow {\n name\n }\n }\n }\n }\n ... on StatusContext {\n context\n state\n }\n }\n }\n }\n\"\"\"\n\n\ndef commits_query(user: str, repo: str, cursor: str = None):\n \"\"\"\n Create a GraphQL query to find the last N commits along with their statuses\n and some metadata (paginated after 'cursor')\n \"\"\"\n after = \"\"\n if cursor is not None:\n after = f', after:\"{cursor}\"'\n\n return f\"\"\"\n {{\n repository(name: \"{repo}\", owner: \"{user}\") {{\n defaultBranchRef {{\n target {{\n ... on Commit {{\n history(first: 15{after}) {{\n edges {{ cursor }}\n nodes {{\n {_commit_query_fields}\n }}\n }}\n }}\n }}\n }}\n }}\n }}\n \"\"\"\n\n\ndef commit_passed_ci(commit: Dict[str, Any]) -> bool:\n \"\"\"\n Returns true if all of a commit's statuses are SUCCESS\n \"\"\"\n statuses = commit[\"statusCheckRollup\"][\"contexts\"][\"nodes\"]\n\n # GitHub Actions statuses are different from external GitHub statuses, so\n # unify them into 1 representation\n # https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads\n unified_statuses = []\n for status in statuses:\n if \"context\" in status:\n # Parse non-GHA status\n unified_statuses.append((status[\"context\"], status[\"state\"] == \"SUCCESS\"))\n else:\n # Parse GitHub Actions item\n workflow = status[\"checkSuite\"][\"workflowRun\"][\"workflow\"][\"name\"]\n name = f\"{workflow} / {status['name']}\"\n unified_statuses.append((name, status[\"conclusion\"] == \"SUCCESS\"))\n\n print(f\"Statuses on {commit['oid']}:\", json.dumps(unified_statuses, indent=2))\n\n # Assert that specific jobs are present in the commit statuses (i.e. don't\n # approve if CI was broken and didn't schedule a job)\n expected_jobs = {\"tvm-ci/branch\"}\n job_names = {name for name, status in unified_statuses}\n for job in expected_jobs:\n if job not in job_names:\n # Did not find expected job name\n return False\n\n passed_ci = all(status for name, status in unified_statuses)\n return passed_ci\n\n\ndef update_branch(user: str, repo: str, sha: str, branch_name: str) -> None:\n git([\"fetch\", \"origin\", sha])\n git([\"reset\", \"--hard\", \"FETCH_HEAD\"])\n try:\n git([\"branch\", \"-D\", branch_name])\n except RuntimeError:\n # Ignore failures (i.e. the branch did not exist in the first place)\n pass\n git([\"checkout\", \"-b\", branch_name])\n\n # Create and push the branch\n git([\"push\", \"origin\", \"--force\", branch_name])\n print(f\"Pushed branch {branch_name} with commit {sha}\")\n\n\nif __name__ == \"__main__\":\n help = \"Push the a branch to the last commit that passed all CI runs\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--remote\", default=\"origin\", help=\"ssh remote to parse\")\n parser.add_argument(\"--dry-run\", action=\"store_true\", help=\"don't submit to GitHub\")\n parser.add_argument(\"--branch\", default=\"last-successful\", help=\"branch name\")\n parser.add_argument(\n \"--testonly-json\", help=\"(testing) data to use instead of fetching from GitHub\"\n )\n args = parser.parse_args()\n\n remote = git([\"config\", \"--get\", f\"remote.{args.remote}.url\"])\n user, repo = parse_remote(remote)\n # TODO: Remove this before landing\n user, repo = (\"apache\", \"tvm\")\n\n if args.testonly_json:\n r = json.loads(args.testonly_json)\n else:\n github = GitHubRepo(token=os.environ[\"GITHUB_TOKEN\"], user=user, repo=repo)\n q = commits_query(user, repo)\n r = github.graphql(q)\n\n commits = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"nodes\"]\n\n # Limit GraphQL pagination\n MAX_COMMITS_TO_CHECK = 50\n i = 0\n\n while i < MAX_COMMITS_TO_CHECK:\n # Check each commit\n for commit in commits:\n if commit_passed_ci(commit):\n print(f\"Found last good commit: {commit['oid']}: {commit['messageHeadline']}\")\n if not args.dry_run:\n update_branch(\n user=user,\n repo=repo,\n sha=commit[\"oid\"],\n branch_name=args.branch,\n )\n # Nothing to do after updating the branch, exit early\n exit(0)\n\n # No good commit found, proceed to next page of results\n edges = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"edges\"]\n if len(edges) == 0:\n break\n else:\n q = commits_query(user, repo, cursor=edges[-1][\"cursor\"])\n r = github.graphql(q)\n commits = r[\"data\"][\"repository\"][\"defaultBranchRef\"][\"target\"][\"history\"][\"nodes\"]\n\n # Backstop to prevent looking through all the past commits\n i += len(commits)\n\n print(f\"No good commits found in the last {len(commits)} commits\")\n exit(1)\n", "path": "ci/scripts/github/update_branch.py"}]}
| 2,634 | 307 |
gh_patches_debug_20714
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-5255
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SpatialFullConvolution loaded from Lua doesn't have finput attribute
I am new to python,when i solve the promblem with the help below:
https://github.com/pytorch/pytorch/issues/897
I find some confusion in the code:
https://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/JoinTable.py#L7
I set ‘dimension=1;self.dimension = dimension’,it seem ok for me,but i don’t kown how the value of ’dimension‘ was initialled.
Thank you !
</issue>
<code>
[start of torch/legacy/nn/SpatialFullConvolution.py]
1 import math
2 import torch
3 from .Module import Module
4 from .utils import clear
5
6
7 class SpatialFullConvolution(Module):
8
9 def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0):
10 super(SpatialFullConvolution, self).__init__()
11
12 self.nInputPlane = nInputPlane
13 self.nOutputPlane = nOutputPlane
14 self.kW = kW
15 self.kH = kH
16 self.dW = dW
17 self.dH = dH
18 self.padW = padW
19 self.padH = padH if padH is not None else padW
20 self.adjW = adjW
21 self.adjH = adjH
22
23 if self.adjW > self.dW - 1 or self.adjH > self.dH - 1:
24 raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively')
25
26 self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)
27 self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)
28 self.bias = torch.Tensor(self.nOutputPlane)
29 self.gradBias = torch.Tensor(self.nOutputPlane)
30
31 self.ones = torch.Tensor()
32 self.finput = None
33 self.fgradInput = None
34 self.zeroScalar = None
35 self._gradOutput = None
36
37 self.reset()
38
39 def noBias(self):
40 self.bias = None
41 self.gradBias = None
42 return self
43
44 def reset(self, stdv=None):
45 if stdv is not None:
46 stdv = stdv * math.sqrt(3)
47 else:
48 nInputPlane = self.nInputPlane
49 kH = self.kH
50 kW = self.kW
51 stdv = 1 / math.sqrt(kW * kH * nInputPlane)
52
53 self.weight.uniform_(-stdv, stdv)
54 if self.bias is not None:
55 self.bias.uniform_(-stdv, stdv)
56
57 def _makeContiguous(self, input, gradOutput=None):
58 if not input.is_contiguous():
59 if self._input is None:
60 self._input = input.new()
61 self._input.resize_as_(input).copy_(input)
62 input = self._input
63
64 if gradOutput is not None:
65 if not gradOutput.is_contiguous():
66 if self._gradOutput is None:
67 self._gradOutput = gradOutput.new()
68 self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)
69 gradOutput = self._gradOutput
70 return input, gradOutput
71
72 return input
73
74 def _calculateAdj(self, targetSize, ker, pad, stride):
75 return (targetSize + 2 * pad - ker) % stride
76
77 def updateOutput(self, input):
78 inputTensor = input
79 adjW, adjH = self.adjW, self.adjH
80
81 # The input can be a table where the second element indicates the target
82 # output size, in which case the adj factors are computed automatically
83 if isinstance(input, list):
84 inputTensor = input[0]
85 targetTensor = input[1]
86 tDims = targetTensor.dim()
87 tH = targetTensor.size(tDims - 2)
88 tW = targetTensor.size(tDims - 1)
89 adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
90 adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
91 if self.finput is None:
92 self.finput = input[0].new()
93 if self.fgradInput is None:
94 self.fgradInput = input[0].new()
95 else:
96 if self.finput is None:
97 self.finput = input.new()
98 if self.fgradInput is None:
99 self.fgradInput = input.new()
100
101 inputTensor = self._makeContiguous(inputTensor)
102 self._backend.SpatialFullConvolution_updateOutput(
103 self._backend.library_state,
104 inputTensor,
105 self.output,
106 self.weight,
107 self.bias,
108 self.finput,
109 self.fgradInput,
110 self.kW, self.kH,
111 self.dW, self.dH,
112 self.padW, self.padH,
113 adjW, adjH
114 )
115 return self.output
116
117 def updateGradInput(self, input, gradOutput):
118 if self.gradInput is None:
119 return
120 inputTensor = input
121 adjW, adjH = self.adjW, self.adjH
122
123 # The input can be a table where the second element indicates the target
124 # output size, in which case the adj factors are computed automatically
125 if isinstance(input, list):
126 inputTensor = input[0]
127 targetTensor = input[1]
128 tDims = targetTensor.dim()
129 tH = targetTensor.size(tDims - 2)
130 tW = targetTensor.size(tDims - 1)
131 adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
132 adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
133 # Momentarily extract the gradInput tensor
134 if isinstance(self.gradInput, list):
135 self.gradInput = self.gradInput[0]
136
137 inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
138 self._backend.SpatialFullConvolution_updateGradInput(
139 self._backend.library_state,
140 inputTensor,
141 gradOutput,
142 self.gradInput,
143 self.weight,
144 self.finput,
145 self.kW, self.kH,
146 self.dW, self.dH,
147 self.padW, self.padH,
148 adjW, adjH
149 )
150
151 if isinstance(input, list):
152 # Create a zero tensor to be expanded and used as gradInput[1].
153 if self.zeroScalar is None:
154 self.zeroScalar = input[1].new(1).zero_()
155 self.ones.resize_(input[1].dim()).fill_(1)
156 zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1])
157 self.gradInput = [self.gradInput, zeroTensor]
158
159 return self.gradInput
160
161 def accGradParameters(self, input, gradOutput, scale=1):
162 inputTensor = input
163 adjW, adjH = self.adjW, self.adjH
164
165 # The input can be a table where the second element indicates the target
166 # output size, in which case the adj factors are computed automatically
167 if isinstance(inputTensor, list):
168 inputTensor = input[0]
169 targetTensor = input[1]
170 tDims = targetTensor.dim()
171 tH = targetTensor.size(tDims - 2)
172 tW = targetTensor.size(tDims - 1)
173 adjW = calculateAdj(tW, self.kW, self.padW, self.dW)
174 adjH = calculateAdj(tH, self.kH, self.padH, self.dH)
175
176 inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)
177 self._backend.SpatialFullConvolution_accGradParameters(
178 self._backend.library_state,
179 inputTensor,
180 gradOutput,
181 self.gradWeight,
182 self.gradBias,
183 self.finput,
184 self.fgradInput,
185 self.kW, self.kH,
186 self.dW, self.dH,
187 self.padW, self.padH,
188 adjW, adjH,
189 scale
190 )
191
192 def type(self, type=None, tensorCache=None):
193 if self.finput is not None:
194 self.finput = torch.Tensor()
195 if self.fgradInput is not None:
196 self.fgradInput = torch.Tensor()
197 return super(SpatialFullConvolution, self).type(type, tensorCache)
198
199 def __repr__(self):
200 s = super(SpatialFullConvolution, self).__repr__()
201 s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)
202 if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:
203 s += ', {}, {}'.format(self.dW, self.dH)
204
205 if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):
206 s += ', {}, {}'.format(self.padW, self.padH)
207
208 if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0):
209 s += ', {}, {}'.format(self.adjW, self.adjH)
210
211 s += ')'
212 if self.bias is None:
213 s += ' without bias'
214 return s
215
216 def clearState(self):
217 clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')
218 return super(SpatialFullConvolution, self).clearState()
219
[end of torch/legacy/nn/SpatialFullConvolution.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torch/legacy/nn/SpatialFullConvolution.py b/torch/legacy/nn/SpatialFullConvolution.py
--- a/torch/legacy/nn/SpatialFullConvolution.py
+++ b/torch/legacy/nn/SpatialFullConvolution.py
@@ -88,14 +88,14 @@
tW = targetTensor.size(tDims - 1)
adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)
adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)
- if self.finput is None:
+ if not hasattr(self, 'finput') or self.finput is None:
self.finput = input[0].new()
- if self.fgradInput is None:
+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:
self.fgradInput = input[0].new()
else:
- if self.finput is None:
+ if not hasattr(self, 'finput') or self.finput is None:
self.finput = input.new()
- if self.fgradInput is None:
+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:
self.fgradInput = input.new()
inputTensor = self._makeContiguous(inputTensor)
|
{"golden_diff": "diff --git a/torch/legacy/nn/SpatialFullConvolution.py b/torch/legacy/nn/SpatialFullConvolution.py\n--- a/torch/legacy/nn/SpatialFullConvolution.py\n+++ b/torch/legacy/nn/SpatialFullConvolution.py\n@@ -88,14 +88,14 @@\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n- if self.finput is None:\n+ if not hasattr(self, 'finput') or self.finput is None:\n self.finput = input[0].new()\n- if self.fgradInput is None:\n+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:\n self.fgradInput = input[0].new()\n else:\n- if self.finput is None:\n+ if not hasattr(self, 'finput') or self.finput is None:\n self.finput = input.new()\n- if self.fgradInput is None:\n+ if not hasattr(self, 'fgradInput') or self.fgradInput is None:\n self.fgradInput = input.new()\n \n inputTensor = self._makeContiguous(inputTensor)\n", "issue": "SpatialFullConvolution loaded from Lua doesn't have finput attribute\nI am new to python\uff0cwhen i solve the promblem with the help below\uff1a\r\nhttps://github.com/pytorch/pytorch/issues/897\r\n\r\nI find some confusion in the code\uff1a\r\nhttps://github.com/pytorch/pytorch/blob/master/torch/legacy/nn/JoinTable.py#L7\r\n\r\nI set \u2018dimension=1\uff1bself.dimension = dimension\u2019\uff0cit seem ok for me\uff0cbut i don\u2019t kown how the value of \u2019dimension\u2018 was initialled. \r\n\r\nThank you !\n", "before_files": [{"content": "import math\nimport torch\nfrom .Module import Module\nfrom .utils import clear\n\n\nclass SpatialFullConvolution(Module):\n\n def __init__(self, nInputPlane, nOutputPlane, kW, kH, dW=1, dH=1, padW=0, padH=None, adjW=0, adjH=0):\n super(SpatialFullConvolution, self).__init__()\n\n self.nInputPlane = nInputPlane\n self.nOutputPlane = nOutputPlane\n self.kW = kW\n self.kH = kH\n self.dW = dW\n self.dH = dH\n self.padW = padW\n self.padH = padH if padH is not None else padW\n self.adjW = adjW\n self.adjH = adjH\n\n if self.adjW > self.dW - 1 or self.adjH > self.dH - 1:\n raise ValueError('adjW and adjH must be smaller than self.dW - 1 and self.dH - 1 respectively')\n\n self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)\n self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW)\n self.bias = torch.Tensor(self.nOutputPlane)\n self.gradBias = torch.Tensor(self.nOutputPlane)\n\n self.ones = torch.Tensor()\n self.finput = None\n self.fgradInput = None\n self.zeroScalar = None\n self._gradOutput = None\n\n self.reset()\n\n def noBias(self):\n self.bias = None\n self.gradBias = None\n return self\n\n def reset(self, stdv=None):\n if stdv is not None:\n stdv = stdv * math.sqrt(3)\n else:\n nInputPlane = self.nInputPlane\n kH = self.kH\n kW = self.kW\n stdv = 1 / math.sqrt(kW * kH * nInputPlane)\n\n self.weight.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.uniform_(-stdv, stdv)\n\n def _makeContiguous(self, input, gradOutput=None):\n if not input.is_contiguous():\n if self._input is None:\n self._input = input.new()\n self._input.resize_as_(input).copy_(input)\n input = self._input\n\n if gradOutput is not None:\n if not gradOutput.is_contiguous():\n if self._gradOutput is None:\n self._gradOutput = gradOutput.new()\n self._gradOutput.resize_as_(gradOutput).copy_(gradOutput)\n gradOutput = self._gradOutput\n return input, gradOutput\n\n return input\n\n def _calculateAdj(self, targetSize, ker, pad, stride):\n return (targetSize + 2 * pad - ker) % stride\n\n def updateOutput(self, input):\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(input, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n if self.finput is None:\n self.finput = input[0].new()\n if self.fgradInput is None:\n self.fgradInput = input[0].new()\n else:\n if self.finput is None:\n self.finput = input.new()\n if self.fgradInput is None:\n self.fgradInput = input.new()\n\n inputTensor = self._makeContiguous(inputTensor)\n self._backend.SpatialFullConvolution_updateOutput(\n self._backend.library_state,\n inputTensor,\n self.output,\n self.weight,\n self.bias,\n self.finput,\n self.fgradInput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH\n )\n return self.output\n\n def updateGradInput(self, input, gradOutput):\n if self.gradInput is None:\n return\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(input, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = self._calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = self._calculateAdj(tH, self.kH, self.padH, self.dH)\n # Momentarily extract the gradInput tensor\n if isinstance(self.gradInput, list):\n self.gradInput = self.gradInput[0]\n\n inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)\n self._backend.SpatialFullConvolution_updateGradInput(\n self._backend.library_state,\n inputTensor,\n gradOutput,\n self.gradInput,\n self.weight,\n self.finput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH\n )\n\n if isinstance(input, list):\n # Create a zero tensor to be expanded and used as gradInput[1].\n if self.zeroScalar is None:\n self.zeroScalar = input[1].new(1).zero_()\n self.ones.resize_(input[1].dim()).fill_(1)\n zeroTensor = self.zeroScalar.view_as(self.ones).expand_as(input[1])\n self.gradInput = [self.gradInput, zeroTensor]\n\n return self.gradInput\n\n def accGradParameters(self, input, gradOutput, scale=1):\n inputTensor = input\n adjW, adjH = self.adjW, self.adjH\n\n # The input can be a table where the second element indicates the target\n # output size, in which case the adj factors are computed automatically\n if isinstance(inputTensor, list):\n inputTensor = input[0]\n targetTensor = input[1]\n tDims = targetTensor.dim()\n tH = targetTensor.size(tDims - 2)\n tW = targetTensor.size(tDims - 1)\n adjW = calculateAdj(tW, self.kW, self.padW, self.dW)\n adjH = calculateAdj(tH, self.kH, self.padH, self.dH)\n\n inputTensor, gradOutput = self._makeContiguous(inputTensor, gradOutput)\n self._backend.SpatialFullConvolution_accGradParameters(\n self._backend.library_state,\n inputTensor,\n gradOutput,\n self.gradWeight,\n self.gradBias,\n self.finput,\n self.fgradInput,\n self.kW, self.kH,\n self.dW, self.dH,\n self.padW, self.padH,\n adjW, adjH,\n scale\n )\n\n def type(self, type=None, tensorCache=None):\n if self.finput is not None:\n self.finput = torch.Tensor()\n if self.fgradInput is not None:\n self.fgradInput = torch.Tensor()\n return super(SpatialFullConvolution, self).type(type, tensorCache)\n\n def __repr__(self):\n s = super(SpatialFullConvolution, self).__repr__()\n s += '({} -> {}, {}x{}'.format(self.nInputPlane, self.nOutputPlane, self.kW, self.kH)\n if self.dW != 1 or self.dH != 1 or self.padW != 0 or self.padH != 0:\n s += ', {}, {}'.format(self.dW, self.dH)\n\n if (self.padW or self.padH) and (self.padW != 0 or self.padH != 0):\n s += ', {}, {}'.format(self.padW, self.padH)\n\n if (self.adjW or self.adjH) and (self.adjW != 0 or self.adjH != 0):\n s += ', {}, {}'.format(self.adjW, self.adjH)\n\n s += ')'\n if self.bias is None:\n s += ' without bias'\n return s\n\n def clearState(self):\n clear(self, 'finput', 'fgradInput', '_input', '_gradOutput')\n return super(SpatialFullConvolution, self).clearState()\n", "path": "torch/legacy/nn/SpatialFullConvolution.py"}]}
| 3,197 | 303 |
gh_patches_debug_10600
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1162
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception BO
feeder_electricity_1 | Exception while fetching production of BO
feeder_electricity_1 | Traceback (most recent call last):
feeder_electricity_1 | File "feeder_electricity.py", line 111, in fetch_production
feeder_electricity_1 | objs = parser(country_code, session)
feeder_electricity_1 | File "/home/feeder/lib_electricitymap/parsers/BO.py", line 149, in fetch_production
feeder_electricity_1 | sum(datapoint['production'].values()) != 0.0]):
feeder_electricity_1 | AttributeError: 'NoneType' object has no attribute 'values'
</issue>
<code>
[start of parsers/BO.py]
1 #!/usr/bin/env python3
2
3 # The arrow library is used to handle datetimes
4 import arrow
5 # The request library is used to fetch content through HTTP
6 import requests
7 # The numpy and pandas libraries are used to manipulate real time data
8 import pandas as pd
9 # The BeautifulSoup library is used parse web html
10 from bs4 import BeautifulSoup
11
12 tz_bo = 'America/La_Paz'
13
14 MAP_GENERATION = {
15 'hydro': 'Hidro',
16 'unknown': 'Termo',
17 'wind': 'Intermitentes'
18 }
19
20
21 def webparser(resp):
22 """Takes content from the corresponding webpage and returns the necessary outputs in a dataframe"""
23 # get the response as an html
24 soup = BeautifulSoup(resp.text, 'html.parser')
25 # Each variable correspond to a row
26 rows = soup.find_all("row")
27 # Extract the name of variables and position
28 variables = []
29 corresponding_row = []
30 hours = []
31 for i_row in range(len(rows)):
32 for tag in rows[i_row].find_all("string"):
33 if not tag.get_text().isdigit():
34 variables.append(tag.get_text().strip())
35 corresponding_row.append(i_row)
36 else:
37 hours.append(int(tag.get_text()))
38 # Define output frame
39 obj = pd.DataFrame(0, index=range(24), columns=['hour'] + variables)
40 # Fill it with hours and variables' value
41 obj.hour = hours
42 for i_row, row in enumerate(corresponding_row):
43 numbers = [float(numb.text) for numb in rows[row].find_all("number")]
44 for i_num, num in enumerate(numbers):
45 obj.loc[i_num, (variables[i_row])] = num
46 # Define negative values to NaN
47 obj[obj < 0] = 0
48
49 return obj
50
51
52 def fetch_hourly_production(country_code, obj, date):
53 """Returns a list of dictionaries."""
54
55 production_by_hour = []
56 for index, row in obj.iterrows():
57
58 data = {
59 'countryCode': country_code,
60 'production': {},
61 'storage': {},
62 'source': 'cndc.bo',
63 }
64 # Fill datetime variable
65 # Datetime are recorded from hour 1 to 24 in the web service
66 if row['hour'] == 24:
67 row['hour'] = 0
68 date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')
69 # date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')
70 data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,
71 hour=int(row['hour'])).datetime
72
73 # Fill production types
74 for i_type in MAP_GENERATION.keys():
75 try:
76 data['production'][i_type] = row[MAP_GENERATION[i_type]]
77 except KeyError as e:
78 data['production'] = None
79 break
80
81 production_by_hour.append(data)
82
83 return production_by_hour
84
85
86 def fetch_production(country_code='BO', session=None):
87 """
88 Requests the last known production mix (in MW) of a given country
89 Arguments:
90 country_code (optional) -- used in case a parser is able to fetch multiple countries
91 Return:
92 A dictionary in the form:
93 {
94 'countryCode': 'FR',
95 'datetime': '2017-01-01T00:00:00Z',
96 'production': {
97 'biomass': 0.0,
98 'coal': 0.0,
99 'gas': 0.0,
100 'hydro': 0.0,
101 'nuclear': null,
102 'oil': 0.0,
103 'solar': 0.0,
104 'wind': 0.0,
105 'geothermal': 0.0,
106 'unknown': 0.0
107 },
108 'storage': {
109 'hydro': -10.0,
110 },
111 'source': 'mysource.com'
112 }
113 """
114
115 # Define actual and previous day (for midnight data).
116 now = arrow.now(tz=tz_bo)
117 formatted_date = now.format('YYYY-MM-DD')
118 past_formatted_date = arrow.get(formatted_date, 'YYYY-MM-DD').shift(days=-1).format(
119 'YYYY-MM-DD')
120
121 # initial path for url to request
122 url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='
123
124 # Start with data for previous day in order to get midnight data.
125 url = url_init + past_formatted_date
126 r = session or requests.session()
127 response = r.get(url)
128 obj = webparser(response)
129 data_yesterday = fetch_hourly_production(country_code, obj, past_formatted_date)
130
131 # Now get data for rest of today.
132 url = url_init + formatted_date
133 r = session or requests.session()
134 response = r.get(url)
135 obj = webparser(response)
136 data_today = fetch_hourly_production(country_code, obj, formatted_date)
137
138 data = data_yesterday + data_today
139
140 # Drop any datapoints where;
141 # 1) A type of generation is totally missing resulting in None.
142 # 2) Datapoint is in the future.
143 # 3) All production values are zero, this can happen because the data source
144 # updates ~5mins after the hour so condition 2 will pass.
145 valid_data = []
146 for datapoint in data:
147 if all([datapoint['production'] is not None,
148 now.datetime > datapoint['datetime'],
149 sum(datapoint['production'].values()) != 0.0]):
150 valid_data.append(datapoint)
151
152 return valid_data
153
154
155 def fetch_hourly_generation_forecast(country_code, obj, date):
156 """Returns a list of dictionaries."""
157
158 hourly_forecast = []
159 for index, row in obj.iterrows():
160 data = {
161 'countryCode': country_code,
162 'value': {},
163 'source': 'cndc.bo',
164 }
165
166 # Fill forecasted value
167 data['value'] = row['Gen.Prevista']
168
169 # Fill datetime variable - changing format if midnight (datetime are recorded from hour 1 to 24 in the webservice)
170 if row['hour'] == 24:
171 row['hour'] = 0
172 date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')
173 data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,
174 hour=int(row['hour'])).datetime
175
176 hourly_forecast.append(data)
177
178 return hourly_forecast
179
180
181 def fetch_generation_forecast(country_code='BO', session=None):
182 # Define actual and last day (for midnight data)
183 formatted_date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')
184
185 # initial path for url to request
186 url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='
187 url = url_init + formatted_date
188
189 r = session or requests.session()
190 response = r.get(url)
191 obj = webparser(response)
192 forecast = fetch_hourly_generation_forecast('BO', obj, formatted_date)
193
194 return forecast
195
196
197 if __name__ == '__main__':
198 """Main method, never used by the Electricity Map backend, but handy for testing."""
199 print('fetch_production() ->')
200 print(fetch_production())
201 print('fetch_generation_forecast() ->')
202 print(fetch_generation_forecast())
203
[end of parsers/BO.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/BO.py b/parsers/BO.py
--- a/parsers/BO.py
+++ b/parsers/BO.py
@@ -144,9 +144,13 @@
# updates ~5mins after the hour so condition 2 will pass.
valid_data = []
for datapoint in data:
- if all([datapoint['production'] is not None,
- now.datetime > datapoint['datetime'],
- sum(datapoint['production'].values()) != 0.0]):
+ if datapoint['production'] is None:
+ continue
+ elif now.datetime < datapoint['datetime']:
+ continue
+ elif sum(datapoint['production'].values()) == 0.0:
+ continue
+ else:
valid_data.append(datapoint)
return valid_data
|
{"golden_diff": "diff --git a/parsers/BO.py b/parsers/BO.py\n--- a/parsers/BO.py\n+++ b/parsers/BO.py\n@@ -144,9 +144,13 @@\n # updates ~5mins after the hour so condition 2 will pass.\n valid_data = []\n for datapoint in data:\n- if all([datapoint['production'] is not None,\n- now.datetime > datapoint['datetime'],\n- sum(datapoint['production'].values()) != 0.0]):\n+ if datapoint['production'] is None:\n+ continue\n+ elif now.datetime < datapoint['datetime']:\n+ continue\n+ elif sum(datapoint['production'].values()) == 0.0:\n+ continue\n+ else:\n valid_data.append(datapoint)\n \n return valid_data\n", "issue": "Exception BO\nfeeder_electricity_1 | Exception while fetching production of BO\r\nfeeder_electricity_1 | Traceback (most recent call last):\r\nfeeder_electricity_1 | File \"feeder_electricity.py\", line 111, in fetch_production\r\nfeeder_electricity_1 | objs = parser(country_code, session)\r\nfeeder_electricity_1 | File \"/home/feeder/lib_electricitymap/parsers/BO.py\", line 149, in fetch_production\r\nfeeder_electricity_1 | sum(datapoint['production'].values()) != 0.0]):\r\nfeeder_electricity_1 | AttributeError: 'NoneType' object has no attribute 'values'\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n# The numpy and pandas libraries are used to manipulate real time data\nimport pandas as pd\n# The BeautifulSoup library is used parse web html\nfrom bs4 import BeautifulSoup\n\ntz_bo = 'America/La_Paz'\n\nMAP_GENERATION = {\n 'hydro': 'Hidro',\n 'unknown': 'Termo',\n 'wind': 'Intermitentes'\n}\n\n\ndef webparser(resp):\n \"\"\"Takes content from the corresponding webpage and returns the necessary outputs in a dataframe\"\"\"\n # get the response as an html\n soup = BeautifulSoup(resp.text, 'html.parser')\n # Each variable correspond to a row\n rows = soup.find_all(\"row\")\n # Extract the name of variables and position\n variables = []\n corresponding_row = []\n hours = []\n for i_row in range(len(rows)):\n for tag in rows[i_row].find_all(\"string\"):\n if not tag.get_text().isdigit():\n variables.append(tag.get_text().strip())\n corresponding_row.append(i_row)\n else:\n hours.append(int(tag.get_text()))\n # Define output frame\n obj = pd.DataFrame(0, index=range(24), columns=['hour'] + variables)\n # Fill it with hours and variables' value\n obj.hour = hours\n for i_row, row in enumerate(corresponding_row):\n numbers = [float(numb.text) for numb in rows[row].find_all(\"number\")]\n for i_num, num in enumerate(numbers):\n obj.loc[i_num, (variables[i_row])] = num\n # Define negative values to NaN\n obj[obj < 0] = 0\n\n return obj\n\n\ndef fetch_hourly_production(country_code, obj, date):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n production_by_hour = []\n for index, row in obj.iterrows():\n\n data = {\n 'countryCode': country_code,\n 'production': {},\n 'storage': {},\n 'source': 'cndc.bo',\n }\n # Fill datetime variable\n # Datetime are recorded from hour 1 to 24 in the web service\n if row['hour'] == 24:\n row['hour'] = 0\n date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')\n # date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')\n data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,\n hour=int(row['hour'])).datetime\n\n # Fill production types\n for i_type in MAP_GENERATION.keys():\n try:\n data['production'][i_type] = row[MAP_GENERATION[i_type]]\n except KeyError as e:\n data['production'] = None\n break\n\n production_by_hour.append(data)\n\n return production_by_hour\n\n\ndef fetch_production(country_code='BO', session=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n country_code (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'countryCode': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n # Define actual and previous day (for midnight data).\n now = arrow.now(tz=tz_bo)\n formatted_date = now.format('YYYY-MM-DD')\n past_formatted_date = arrow.get(formatted_date, 'YYYY-MM-DD').shift(days=-1).format(\n 'YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='\n\n # Start with data for previous day in order to get midnight data.\n url = url_init + past_formatted_date\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n data_yesterday = fetch_hourly_production(country_code, obj, past_formatted_date)\n\n # Now get data for rest of today.\n url = url_init + formatted_date\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n data_today = fetch_hourly_production(country_code, obj, formatted_date)\n\n data = data_yesterday + data_today\n\n # Drop any datapoints where;\n # 1) A type of generation is totally missing resulting in None.\n # 2) Datapoint is in the future.\n # 3) All production values are zero, this can happen because the data source\n # updates ~5mins after the hour so condition 2 will pass.\n valid_data = []\n for datapoint in data:\n if all([datapoint['production'] is not None,\n now.datetime > datapoint['datetime'],\n sum(datapoint['production'].values()) != 0.0]):\n valid_data.append(datapoint)\n\n return valid_data\n\n\ndef fetch_hourly_generation_forecast(country_code, obj, date):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n hourly_forecast = []\n for index, row in obj.iterrows():\n data = {\n 'countryCode': country_code,\n 'value': {},\n 'source': 'cndc.bo',\n }\n\n # Fill forecasted value\n data['value'] = row['Gen.Prevista']\n\n # Fill datetime variable - changing format if midnight (datetime are recorded from hour 1 to 24 in the webservice)\n if row['hour'] == 24:\n row['hour'] = 0\n date = arrow.get(date, 'YYYY-MM-DD').shift(days=+1).format('YYYY-MM-DD')\n data['datetime'] = arrow.get(date, 'YYYY-MM-DD').replace(tzinfo=tz_bo,\n hour=int(row['hour'])).datetime\n\n hourly_forecast.append(data)\n\n return hourly_forecast\n\n\ndef fetch_generation_forecast(country_code='BO', session=None):\n # Define actual and last day (for midnight data)\n formatted_date = arrow.now(tz=tz_bo).format('YYYY-MM-DD')\n\n # initial path for url to request\n url_init = 'http://www.cndc.bo/media/archivos/graf/gene_hora/despacho_diario.php?fechag='\n url = url_init + formatted_date\n\n r = session or requests.session()\n response = r.get(url)\n obj = webparser(response)\n forecast = fetch_hourly_generation_forecast('BO', obj, formatted_date)\n\n return forecast\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_generation_forecast() ->')\n print(fetch_generation_forecast())\n", "path": "parsers/BO.py"}]}
| 2,870 | 187 |
gh_patches_debug_1702
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-331
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Figure out what to do with http_service on CI
We have two options:
- build the http_service with fake models and don't push it on CI. Build it with real models and push it after training;
- build the http_service without models and let it download models at runtime.
</issue>
<code>
[start of http_service/check_models.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import logging
7 import os
8 import sys
9
10 from bugbug.models.component import ComponentModel
11 from bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel
12 from bugbug.models.regression import RegressionModel
13
14 logging.basicConfig(level=logging.INFO)
15 LOGGER = logging.getLogger()
16
17 MODELS = {
18 "defectenhancementtask": DefectEnhancementTaskModel,
19 "component": ComponentModel,
20 "regression": RegressionModel,
21 }
22 MODELS_DIR = os.path.join(os.path.dirname(__file__), "models")
23
24
25 def load_model(model):
26 model_file_path = os.path.join(MODELS_DIR, f"{model}model")
27 LOGGER.info(f"Lookup model in {model_file_path}")
28 model = MODELS[model].load(model_file_path)
29 return model
30
31
32 def check_models():
33 for model_name in MODELS.keys():
34 # Try loading the model
35 load_model(model_name)
36
37
38 if __name__ == "__main__":
39 try:
40 check_models()
41 except Exception:
42 LOGGER.warning(
43 "Failed to validate the models, please run `python models.py download`",
44 exc_info=True,
45 )
46 sys.exit(1)
47
[end of http_service/check_models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/http_service/check_models.py b/http_service/check_models.py
--- a/http_service/check_models.py
+++ b/http_service/check_models.py
@@ -36,6 +36,13 @@
if __name__ == "__main__":
+
+ should_check_models = os.environ.get("CHECK_MODELS", "1")
+
+ if should_check_models == "0":
+ print("Skipping checking models as instructed by env var $CHECK_MODELS")
+ sys.exit(0)
+
try:
check_models()
except Exception:
|
{"golden_diff": "diff --git a/http_service/check_models.py b/http_service/check_models.py\n--- a/http_service/check_models.py\n+++ b/http_service/check_models.py\n@@ -36,6 +36,13 @@\n \n \n if __name__ == \"__main__\":\n+\n+ should_check_models = os.environ.get(\"CHECK_MODELS\", \"1\")\n+\n+ if should_check_models == \"0\":\n+ print(\"Skipping checking models as instructed by env var $CHECK_MODELS\")\n+ sys.exit(0)\n+\n try:\n check_models()\n except Exception:\n", "issue": "Figure out what to do with http_service on CI\nWe have two options:\r\n- build the http_service with fake models and don't push it on CI. Build it with real models and push it after training;\r\n- build the http_service without models and let it download models at runtime.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nfrom bugbug.models.component import ComponentModel\nfrom bugbug.models.defect_enhancement_task import DefectEnhancementTaskModel\nfrom bugbug.models.regression import RegressionModel\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger()\n\nMODELS = {\n \"defectenhancementtask\": DefectEnhancementTaskModel,\n \"component\": ComponentModel,\n \"regression\": RegressionModel,\n}\nMODELS_DIR = os.path.join(os.path.dirname(__file__), \"models\")\n\n\ndef load_model(model):\n model_file_path = os.path.join(MODELS_DIR, f\"{model}model\")\n LOGGER.info(f\"Lookup model in {model_file_path}\")\n model = MODELS[model].load(model_file_path)\n return model\n\n\ndef check_models():\n for model_name in MODELS.keys():\n # Try loading the model\n load_model(model_name)\n\n\nif __name__ == \"__main__\":\n try:\n check_models()\n except Exception:\n LOGGER.warning(\n \"Failed to validate the models, please run `python models.py download`\",\n exc_info=True,\n )\n sys.exit(1)\n", "path": "http_service/check_models.py"}]}
| 990 | 119 |
gh_patches_debug_26712
|
rasdani/github-patches
|
git_diff
|
dask__distributed-8447
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`distributed.protocol.dumps` does not handle objects not serializable via `msgpack.dumps` as suggested by docstring
When using `distributed.protocol.dumps` to serialize arbitrary data, it fails and raises a `TypeError`. The docstring suggests it should be able to handle this.
**Minimal Complete Verifiable Example**:
```python
import numpy as np
from distributed.protocol import dumps
dumps(np.int64(123))
```
raises
```python
2024-01-09 19:55:04,572 - distributed.protocol.core - CRITICAL - Failed to Serialize
Traceback (most recent call last):
File "/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py", line 108, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py", line 36, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "msgpack/_packer.pyx", line 294, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 300, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 297, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 291, in msgpack._cmsgpack.Packer._pack
TypeError: can not serialize 'numpy.int64' object
Traceback (most recent call last):
File "/Users/hendrikmakait/projects/dask/distributed/sandbox.py", line 5, in <module>
dumps(np.int64(123))
File "/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py", line 108, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py", line 36, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "msgpack/_packer.pyx", line 294, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 300, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 297, in msgpack._cmsgpack.Packer.pack
File "msgpack/_packer.pyx", line 291, in msgpack._cmsgpack.Packer._pack
TypeError: can not serialize 'numpy.int64' object
```
Relevant section of `dumps`'s docstring: https://github.com/dask/distributed/blob/7562f9c566978de4f3f5b73920a24ea1813d6e28/distributed/protocol/core.py#L30-L35
</issue>
<code>
[start of distributed/protocol/core.py]
1 from __future__ import annotations
2
3 import logging
4
5 import msgpack
6
7 from distributed.protocol import pickle
8 from distributed.protocol.compression import decompress, maybe_compress
9 from distributed.protocol.serialize import (
10 Pickled,
11 Serialize,
12 Serialized,
13 ToPickle,
14 merge_and_deserialize,
15 msgpack_decode_default,
16 msgpack_encode_default,
17 serialize_and_split,
18 )
19 from distributed.protocol.utils import msgpack_opts
20 from distributed.utils import ensure_memoryview
21
22 logger = logging.getLogger(__name__)
23
24
25 def dumps( # type: ignore[no-untyped-def]
26 msg, serializers=None, on_error="message", context=None, frame_split_size=None
27 ) -> list:
28 """Transform Python message to bytestream suitable for communication
29
30 Developer Notes
31 ---------------
32 The approach here is to use `msgpack.dumps()` to serialize `msg` and
33 write the result to the first output frame. If `msgpack.dumps()`
34 encounters an object it cannot serialize like a NumPy array, it is handled
35 out-of-band by `_encode_default()` and appended to the output frame list.
36 """
37 try:
38 if context and "compression" in context:
39 compress_opts = {"compression": context["compression"]}
40 else:
41 compress_opts = {}
42
43 def _inplace_compress_frames(header, frames):
44 compression = list(header.get("compression", [None] * len(frames)))
45
46 for i in range(len(frames)):
47 if compression[i] is None:
48 compression[i], frames[i] = maybe_compress(
49 frames[i], **compress_opts
50 )
51
52 header["compression"] = tuple(compression)
53
54 def create_serialized_sub_frames(obj: Serialized | Serialize) -> list:
55 if isinstance(obj, Serialized):
56 sub_header, sub_frames = obj.header, obj.frames
57 else:
58 sub_header, sub_frames = serialize_and_split(
59 obj,
60 serializers=serializers,
61 on_error=on_error,
62 context=context,
63 size=frame_split_size,
64 )
65 _inplace_compress_frames(sub_header, sub_frames)
66 sub_header["num-sub-frames"] = len(sub_frames)
67 sub_header = msgpack.dumps(
68 sub_header, default=msgpack_encode_default, use_bin_type=True
69 )
70 return [sub_header] + sub_frames
71
72 def create_pickled_sub_frames(obj: Pickled | ToPickle) -> list:
73 if isinstance(obj, Pickled):
74 sub_header, sub_frames = obj.header, obj.frames
75 else:
76 sub_frames = []
77 sub_header = {
78 "pickled-obj": pickle.dumps(
79 obj.data,
80 # In to support len() and slicing, we convert `PickleBuffer`
81 # objects to memoryviews of bytes.
82 buffer_callback=lambda x: sub_frames.append(
83 ensure_memoryview(x)
84 ),
85 )
86 }
87 _inplace_compress_frames(sub_header, sub_frames)
88
89 sub_header["num-sub-frames"] = len(sub_frames)
90 sub_header = msgpack.dumps(sub_header)
91 return [sub_header] + sub_frames
92
93 frames = [None]
94
95 def _encode_default(obj):
96 if isinstance(obj, (Serialize, Serialized)):
97 offset = len(frames)
98 frames.extend(create_serialized_sub_frames(obj))
99 return {"__Serialized__": offset}
100 elif isinstance(obj, (ToPickle, Pickled)):
101 offset = len(frames)
102 frames.extend(create_pickled_sub_frames(obj))
103 return {"__Pickled__": offset}
104 else:
105 return msgpack_encode_default(obj)
106
107 frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
108 return frames
109
110 except Exception:
111 logger.critical("Failed to Serialize", exc_info=True)
112 raise
113
114
115 def loads(frames, deserialize=True, deserializers=None):
116 """Transform bytestream back into Python value"""
117
118 try:
119
120 def _decode_default(obj):
121 offset = obj.get("__Serialized__", 0)
122 if offset > 0:
123 sub_header = msgpack.loads(
124 frames[offset],
125 object_hook=msgpack_decode_default,
126 use_list=False,
127 **msgpack_opts,
128 )
129 offset += 1
130 sub_frames = frames[offset : offset + sub_header["num-sub-frames"]]
131 if deserialize:
132 if "compression" in sub_header:
133 sub_frames = decompress(sub_header, sub_frames)
134 return merge_and_deserialize(
135 sub_header, sub_frames, deserializers=deserializers
136 )
137 else:
138 return Serialized(sub_header, sub_frames)
139
140 offset = obj.get("__Pickled__", 0)
141 if offset > 0:
142 sub_header = msgpack.loads(frames[offset])
143 offset += 1
144 sub_frames = frames[offset : offset + sub_header["num-sub-frames"]]
145 if "compression" in sub_header:
146 sub_frames = decompress(sub_header, sub_frames)
147 return pickle.loads(sub_header["pickled-obj"], buffers=sub_frames)
148 return msgpack_decode_default(obj)
149
150 return msgpack.loads(
151 frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts
152 )
153
154 except Exception:
155 logger.critical("Failed to deserialize", exc_info=True)
156 raise
157
[end of distributed/protocol/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/distributed/protocol/core.py b/distributed/protocol/core.py
--- a/distributed/protocol/core.py
+++ b/distributed/protocol/core.py
@@ -11,6 +11,7 @@
Serialize,
Serialized,
ToPickle,
+ _is_msgpack_serializable,
merge_and_deserialize,
msgpack_decode_default,
msgpack_encode_default,
@@ -104,7 +105,31 @@
else:
return msgpack_encode_default(obj)
- frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
+ try:
+ frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
+ except TypeError as e:
+ logger.info(
+ f"Failed to serialize ({e}); falling back to pickle. "
+ "Be aware that this may degrade performance."
+ )
+
+ def _encode_default_safe(obj):
+ encoded = _encode_default(obj)
+ if encoded is not obj or _is_msgpack_serializable(obj):
+ return encoded
+
+ obj = ToPickle(obj)
+ offset = len(frames)
+ frames.extend(create_pickled_sub_frames(obj))
+ return {"__Pickled__": offset}
+
+ # If possible, we want to avoid the performance penalty from the checks
+ # implemented in _encode_default_safe to fall back to pickle, so we
+ # try to serialize the data without the fallback first assuming that
+ # this succeeds in the overwhelming majority of cases.
+ frames[0] = msgpack.dumps(
+ msg, default=_encode_default_safe, use_bin_type=True
+ )
return frames
except Exception:
|
{"golden_diff": "diff --git a/distributed/protocol/core.py b/distributed/protocol/core.py\n--- a/distributed/protocol/core.py\n+++ b/distributed/protocol/core.py\n@@ -11,6 +11,7 @@\n Serialize,\n Serialized,\n ToPickle,\n+ _is_msgpack_serializable,\n merge_and_deserialize,\n msgpack_decode_default,\n msgpack_encode_default,\n@@ -104,7 +105,31 @@\n else:\n return msgpack_encode_default(obj)\n \n- frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n+ try:\n+ frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n+ except TypeError as e:\n+ logger.info(\n+ f\"Failed to serialize ({e}); falling back to pickle. \"\n+ \"Be aware that this may degrade performance.\"\n+ )\n+\n+ def _encode_default_safe(obj):\n+ encoded = _encode_default(obj)\n+ if encoded is not obj or _is_msgpack_serializable(obj):\n+ return encoded\n+\n+ obj = ToPickle(obj)\n+ offset = len(frames)\n+ frames.extend(create_pickled_sub_frames(obj))\n+ return {\"__Pickled__\": offset}\n+\n+ # If possible, we want to avoid the performance penalty from the checks\n+ # implemented in _encode_default_safe to fall back to pickle, so we\n+ # try to serialize the data without the fallback first assuming that\n+ # this succeeds in the overwhelming majority of cases.\n+ frames[0] = msgpack.dumps(\n+ msg, default=_encode_default_safe, use_bin_type=True\n+ )\n return frames\n \n except Exception:\n", "issue": "`distributed.protocol.dumps` does not handle objects not serializable via `msgpack.dumps` as suggested by docstring\nWhen using `distributed.protocol.dumps` to serialize arbitrary data, it fails and raises a `TypeError`. The docstring suggests it should be able to handle this.\r\n\r\n**Minimal Complete Verifiable Example**:\r\n\r\n```python\r\nimport numpy as np\r\nfrom distributed.protocol import dumps\r\n\r\ndumps(np.int64(123))\r\n```\r\n\r\nraises\r\n\r\n```python\r\n2024-01-09 19:55:04,572 - distributed.protocol.core - CRITICAL - Failed to Serialize\r\nTraceback (most recent call last):\r\n File \"/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py\", line 108, in dumps\r\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py\", line 36, in packb\r\n return Packer(**kwargs).pack(o)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"msgpack/_packer.pyx\", line 294, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 300, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 297, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 291, in msgpack._cmsgpack.Packer._pack\r\nTypeError: can not serialize 'numpy.int64' object\r\nTraceback (most recent call last):\r\n File \"/Users/hendrikmakait/projects/dask/distributed/sandbox.py\", line 5, in <module>\r\n dumps(np.int64(123))\r\n File \"/Users/hendrikmakait/projects/dask/distributed/distributed/protocol/core.py\", line 108, in dumps\r\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Caskroom/mambaforge/base/envs/dask-distributed/lib/python3.12/site-packages/msgpack/__init__.py\", line 36, in packb\r\n return Packer(**kwargs).pack(o)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"msgpack/_packer.pyx\", line 294, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 300, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 297, in msgpack._cmsgpack.Packer.pack\r\n File \"msgpack/_packer.pyx\", line 291, in msgpack._cmsgpack.Packer._pack\r\nTypeError: can not serialize 'numpy.int64' object\r\n```\r\n\r\nRelevant section of `dumps`'s docstring: https://github.com/dask/distributed/blob/7562f9c566978de4f3f5b73920a24ea1813d6e28/distributed/protocol/core.py#L30-L35\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\n\nimport msgpack\n\nfrom distributed.protocol import pickle\nfrom distributed.protocol.compression import decompress, maybe_compress\nfrom distributed.protocol.serialize import (\n Pickled,\n Serialize,\n Serialized,\n ToPickle,\n merge_and_deserialize,\n msgpack_decode_default,\n msgpack_encode_default,\n serialize_and_split,\n)\nfrom distributed.protocol.utils import msgpack_opts\nfrom distributed.utils import ensure_memoryview\n\nlogger = logging.getLogger(__name__)\n\n\ndef dumps( # type: ignore[no-untyped-def]\n msg, serializers=None, on_error=\"message\", context=None, frame_split_size=None\n) -> list:\n \"\"\"Transform Python message to bytestream suitable for communication\n\n Developer Notes\n ---------------\n The approach here is to use `msgpack.dumps()` to serialize `msg` and\n write the result to the first output frame. If `msgpack.dumps()`\n encounters an object it cannot serialize like a NumPy array, it is handled\n out-of-band by `_encode_default()` and appended to the output frame list.\n \"\"\"\n try:\n if context and \"compression\" in context:\n compress_opts = {\"compression\": context[\"compression\"]}\n else:\n compress_opts = {}\n\n def _inplace_compress_frames(header, frames):\n compression = list(header.get(\"compression\", [None] * len(frames)))\n\n for i in range(len(frames)):\n if compression[i] is None:\n compression[i], frames[i] = maybe_compress(\n frames[i], **compress_opts\n )\n\n header[\"compression\"] = tuple(compression)\n\n def create_serialized_sub_frames(obj: Serialized | Serialize) -> list:\n if isinstance(obj, Serialized):\n sub_header, sub_frames = obj.header, obj.frames\n else:\n sub_header, sub_frames = serialize_and_split(\n obj,\n serializers=serializers,\n on_error=on_error,\n context=context,\n size=frame_split_size,\n )\n _inplace_compress_frames(sub_header, sub_frames)\n sub_header[\"num-sub-frames\"] = len(sub_frames)\n sub_header = msgpack.dumps(\n sub_header, default=msgpack_encode_default, use_bin_type=True\n )\n return [sub_header] + sub_frames\n\n def create_pickled_sub_frames(obj: Pickled | ToPickle) -> list:\n if isinstance(obj, Pickled):\n sub_header, sub_frames = obj.header, obj.frames\n else:\n sub_frames = []\n sub_header = {\n \"pickled-obj\": pickle.dumps(\n obj.data,\n # In to support len() and slicing, we convert `PickleBuffer`\n # objects to memoryviews of bytes.\n buffer_callback=lambda x: sub_frames.append(\n ensure_memoryview(x)\n ),\n )\n }\n _inplace_compress_frames(sub_header, sub_frames)\n\n sub_header[\"num-sub-frames\"] = len(sub_frames)\n sub_header = msgpack.dumps(sub_header)\n return [sub_header] + sub_frames\n\n frames = [None]\n\n def _encode_default(obj):\n if isinstance(obj, (Serialize, Serialized)):\n offset = len(frames)\n frames.extend(create_serialized_sub_frames(obj))\n return {\"__Serialized__\": offset}\n elif isinstance(obj, (ToPickle, Pickled)):\n offset = len(frames)\n frames.extend(create_pickled_sub_frames(obj))\n return {\"__Pickled__\": offset}\n else:\n return msgpack_encode_default(obj)\n\n frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)\n return frames\n\n except Exception:\n logger.critical(\"Failed to Serialize\", exc_info=True)\n raise\n\n\ndef loads(frames, deserialize=True, deserializers=None):\n \"\"\"Transform bytestream back into Python value\"\"\"\n\n try:\n\n def _decode_default(obj):\n offset = obj.get(\"__Serialized__\", 0)\n if offset > 0:\n sub_header = msgpack.loads(\n frames[offset],\n object_hook=msgpack_decode_default,\n use_list=False,\n **msgpack_opts,\n )\n offset += 1\n sub_frames = frames[offset : offset + sub_header[\"num-sub-frames\"]]\n if deserialize:\n if \"compression\" in sub_header:\n sub_frames = decompress(sub_header, sub_frames)\n return merge_and_deserialize(\n sub_header, sub_frames, deserializers=deserializers\n )\n else:\n return Serialized(sub_header, sub_frames)\n\n offset = obj.get(\"__Pickled__\", 0)\n if offset > 0:\n sub_header = msgpack.loads(frames[offset])\n offset += 1\n sub_frames = frames[offset : offset + sub_header[\"num-sub-frames\"]]\n if \"compression\" in sub_header:\n sub_frames = decompress(sub_header, sub_frames)\n return pickle.loads(sub_header[\"pickled-obj\"], buffers=sub_frames)\n return msgpack_decode_default(obj)\n\n return msgpack.loads(\n frames[0], object_hook=_decode_default, use_list=False, **msgpack_opts\n )\n\n except Exception:\n logger.critical(\"Failed to deserialize\", exc_info=True)\n raise\n", "path": "distributed/protocol/core.py"}]}
| 2,799 | 382 |
gh_patches_debug_5353
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-1949
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"$" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute
**Describe the bug**
"$" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute

**To Reproduce**
Steps to reproduce the behavior:
1. Add a Datasource in the CLI
2. Choose a database, answer one of the prompts (e.g., password) with a string that contains the "$" character
3. See the error
**Expected behavior**
The "$" character should not be treated specially in user's answer to prompts
**Environment (please complete the following information):**
- OS: N/A
- GE Version: 0.12.1
**Additional context**
N/A
</issue>
<code>
[start of great_expectations/data_context/util.py]
1 import copy
2 import inspect
3 import logging
4 import os
5 import re
6 from collections import OrderedDict
7
8 from great_expectations.data_context.types.base import (
9 DataContextConfig,
10 DataContextConfigSchema,
11 )
12 from great_expectations.exceptions import MissingConfigVariableError
13 from great_expectations.util import load_class, verify_dynamic_loading_support
14
15 logger = logging.getLogger(__name__)
16
17
18 # TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default
19 # TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.
20 def instantiate_class_from_config(config, runtime_environment, config_defaults=None):
21 """Build a GE class from configuration dictionaries."""
22
23 if config_defaults is None:
24 config_defaults = {}
25
26 config = copy.deepcopy(config)
27
28 module_name = config.pop("module_name", None)
29 if module_name is None:
30 try:
31 module_name = config_defaults.pop("module_name")
32 except KeyError:
33 raise KeyError(
34 "Neither config : {} nor config_defaults : {} contains a module_name key.".format(
35 config, config_defaults,
36 )
37 )
38 else:
39 # Pop the value without using it, to avoid sending an unwanted value to the config_class
40 config_defaults.pop("module_name", None)
41
42 verify_dynamic_loading_support(module_name=module_name)
43
44 class_name = config.pop("class_name", None)
45 if class_name is None:
46 logger.warning(
47 "Instantiating class from config without an explicit class_name is dangerous. Consider adding "
48 "an explicit class_name for %s" % config.get("name")
49 )
50 try:
51 class_name = config_defaults.pop("class_name")
52 except KeyError:
53 raise KeyError(
54 "Neither config : {} nor config_defaults : {} contains a class_name key.".format(
55 config, config_defaults,
56 )
57 )
58 else:
59 # Pop the value without using it, to avoid sending an unwanted value to the config_class
60 config_defaults.pop("class_name", None)
61
62 class_ = load_class(class_name=class_name, module_name=module_name)
63
64 config_with_defaults = copy.deepcopy(config_defaults)
65 config_with_defaults.update(config)
66 if runtime_environment is not None:
67 # If there are additional kwargs available in the runtime_environment requested by a
68 # class to be instantiated, provide them
69 argspec = inspect.getfullargspec(class_.__init__)[0][1:]
70
71 missing_args = set(argspec) - set(config_with_defaults.keys())
72 config_with_defaults.update(
73 {
74 missing_arg: runtime_environment[missing_arg]
75 for missing_arg in missing_args
76 if missing_arg in runtime_environment
77 }
78 )
79 # Add the entire runtime_environment as well if it's requested
80 if "runtime_environment" in missing_args:
81 config_with_defaults.update({"runtime_environment": runtime_environment})
82
83 try:
84 class_instance = class_(**config_with_defaults)
85 except TypeError as e:
86 raise TypeError(
87 "Couldn't instantiate class : {} with config : \n\t{}\n \n".format(
88 class_name, format_dict_for_error_message(config_with_defaults)
89 )
90 + str(e)
91 )
92
93 return class_instance
94
95
96 def format_dict_for_error_message(dict_):
97 # TODO : Tidy this up a bit. Indentation isn't fully consistent.
98
99 return "\n\t".join("\t\t".join((str(key), str(dict_[key]))) for key in dict_)
100
101
102 def substitute_config_variable(template_str, config_variables_dict):
103 """
104 This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,
105 returns a string where the pattern is replaced with the value of SOME_VARIABLE,
106 otherwise returns the string unchanged.
107
108 If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.
109 If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).
110 If it is not found there, the input string is returned as is.
111
112 :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}
113 or $SOME_VARIABLE
114 :param config_variables_dict: a dictionary of config variables. It is loaded from the
115 config variables store (by default, "uncommitted/config_variables.yml file)
116 :return:
117 """
118 if template_str is None:
119 return template_str
120
121 try:
122 match = re.search(r"\$\{(.*?)\}", template_str) or re.search(
123 r"\$([_a-z][_a-z0-9]*)", template_str
124 )
125 except TypeError:
126 # If the value is not a string (e.g., a boolean), we should return it as is
127 return template_str
128
129 if match:
130 config_variable_value = config_variables_dict.get(match.group(1))
131 if config_variable_value is not None:
132 if match.start() == 0 and match.end() == len(template_str):
133 return config_variable_value
134 else:
135 return (
136 template_str[: match.start()]
137 + config_variable_value
138 + template_str[match.end() :]
139 )
140
141 raise MissingConfigVariableError(
142 f"""\n\nUnable to find a match for config substitution variable: `{match.group(1)}`.
143 Please add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.
144 See https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets""",
145 missing_config_variable=match.group(1),
146 )
147
148 return template_str
149
150
151 def substitute_all_config_variables(data, replace_variables_dict):
152 """
153 Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like
154 config object for their values.
155
156 The method traverses the dictionary recursively.
157
158 :param data:
159 :param replace_variables_dict:
160 :return: a dictionary with all the variables replaced with their values
161 """
162 if isinstance(data, DataContextConfig):
163 data = DataContextConfigSchema().dump(data)
164
165 if isinstance(data, dict) or isinstance(data, OrderedDict):
166 return {
167 k: substitute_all_config_variables(v, replace_variables_dict)
168 for k, v in data.items()
169 }
170 elif isinstance(data, list):
171 return [
172 substitute_all_config_variables(v, replace_variables_dict) for v in data
173 ]
174 return substitute_config_variable(data, replace_variables_dict)
175
176
177 def file_relative_path(dunderfile, relative_path):
178 """
179 This function is useful when one needs to load a file that is
180 relative to the position of the current file. (Such as when
181 you encode a configuration file path in source file and want
182 in runnable in any current working directory)
183
184 It is meant to be used like the following:
185 file_relative_path(__file__, 'path/relative/to/file')
186
187 H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34
188 """
189 return os.path.join(os.path.dirname(dunderfile), relative_path)
190
[end of great_expectations/data_context/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py
--- a/great_expectations/data_context/util.py
+++ b/great_expectations/data_context/util.py
@@ -165,6 +165,8 @@
if isinstance(data, dict) or isinstance(data, OrderedDict):
return {
k: substitute_all_config_variables(v, replace_variables_dict)
+ if k.lower() not in {"password"}
+ else v
for k, v in data.items()
}
elif isinstance(data, list):
|
{"golden_diff": "diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py\n--- a/great_expectations/data_context/util.py\n+++ b/great_expectations/data_context/util.py\n@@ -165,6 +165,8 @@\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n+ if k.lower() not in {\"password\"}\n+ else v\n for k, v in data.items()\n }\n elif isinstance(data, list):\n", "issue": "\"$\" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute\n**Describe the bug**\r\n\"$\" character in user's response to a CLI prompt is mistakenly interpreted as a variable to substitute\r\n\r\n\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Add a Datasource in the CLI\r\n2. Choose a database, answer one of the prompts (e.g., password) with a string that contains the \"$\" character\r\n3. See the error\r\n\r\n**Expected behavior**\r\nThe \"$\" character should not be treated specially in user's answer to prompts\r\n\r\n**Environment (please complete the following information):**\r\n - OS: N/A\r\n - GE Version: 0.12.1\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "import copy\nimport inspect\nimport logging\nimport os\nimport re\nfrom collections import OrderedDict\n\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n DataContextConfigSchema,\n)\nfrom great_expectations.exceptions import MissingConfigVariableError\nfrom great_expectations.util import load_class, verify_dynamic_loading_support\n\nlogger = logging.getLogger(__name__)\n\n\n# TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default\n# TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.\ndef instantiate_class_from_config(config, runtime_environment, config_defaults=None):\n \"\"\"Build a GE class from configuration dictionaries.\"\"\"\n\n if config_defaults is None:\n config_defaults = {}\n\n config = copy.deepcopy(config)\n\n module_name = config.pop(\"module_name\", None)\n if module_name is None:\n try:\n module_name = config_defaults.pop(\"module_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a module_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"module_name\", None)\n\n verify_dynamic_loading_support(module_name=module_name)\n\n class_name = config.pop(\"class_name\", None)\n if class_name is None:\n logger.warning(\n \"Instantiating class from config without an explicit class_name is dangerous. Consider adding \"\n \"an explicit class_name for %s\" % config.get(\"name\")\n )\n try:\n class_name = config_defaults.pop(\"class_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a class_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"class_name\", None)\n\n class_ = load_class(class_name=class_name, module_name=module_name)\n\n config_with_defaults = copy.deepcopy(config_defaults)\n config_with_defaults.update(config)\n if runtime_environment is not None:\n # If there are additional kwargs available in the runtime_environment requested by a\n # class to be instantiated, provide them\n argspec = inspect.getfullargspec(class_.__init__)[0][1:]\n\n missing_args = set(argspec) - set(config_with_defaults.keys())\n config_with_defaults.update(\n {\n missing_arg: runtime_environment[missing_arg]\n for missing_arg in missing_args\n if missing_arg in runtime_environment\n }\n )\n # Add the entire runtime_environment as well if it's requested\n if \"runtime_environment\" in missing_args:\n config_with_defaults.update({\"runtime_environment\": runtime_environment})\n\n try:\n class_instance = class_(**config_with_defaults)\n except TypeError as e:\n raise TypeError(\n \"Couldn't instantiate class : {} with config : \\n\\t{}\\n \\n\".format(\n class_name, format_dict_for_error_message(config_with_defaults)\n )\n + str(e)\n )\n\n return class_instance\n\n\ndef format_dict_for_error_message(dict_):\n # TODO : Tidy this up a bit. Indentation isn't fully consistent.\n\n return \"\\n\\t\".join(\"\\t\\t\".join((str(key), str(dict_[key]))) for key in dict_)\n\n\ndef substitute_config_variable(template_str, config_variables_dict):\n \"\"\"\n This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,\n returns a string where the pattern is replaced with the value of SOME_VARIABLE,\n otherwise returns the string unchanged.\n\n If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.\n If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).\n If it is not found there, the input string is returned as is.\n\n :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}\n or $SOME_VARIABLE\n :param config_variables_dict: a dictionary of config variables. It is loaded from the\n config variables store (by default, \"uncommitted/config_variables.yml file)\n :return:\n \"\"\"\n if template_str is None:\n return template_str\n\n try:\n match = re.search(r\"\\$\\{(.*?)\\}\", template_str) or re.search(\n r\"\\$([_a-z][_a-z0-9]*)\", template_str\n )\n except TypeError:\n # If the value is not a string (e.g., a boolean), we should return it as is\n return template_str\n\n if match:\n config_variable_value = config_variables_dict.get(match.group(1))\n if config_variable_value is not None:\n if match.start() == 0 and match.end() == len(template_str):\n return config_variable_value\n else:\n return (\n template_str[: match.start()]\n + config_variable_value\n + template_str[match.end() :]\n )\n\n raise MissingConfigVariableError(\n f\"\"\"\\n\\nUnable to find a match for config substitution variable: `{match.group(1)}`.\nPlease add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.\nSee https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\",\n missing_config_variable=match.group(1),\n )\n\n return template_str\n\n\ndef substitute_all_config_variables(data, replace_variables_dict):\n \"\"\"\n Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like\n config object for their values.\n\n The method traverses the dictionary recursively.\n\n :param data:\n :param replace_variables_dict:\n :return: a dictionary with all the variables replaced with their values\n \"\"\"\n if isinstance(data, DataContextConfig):\n data = DataContextConfigSchema().dump(data)\n\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n for k, v in data.items()\n }\n elif isinstance(data, list):\n return [\n substitute_all_config_variables(v, replace_variables_dict) for v in data\n ]\n return substitute_config_variable(data, replace_variables_dict)\n\n\ndef file_relative_path(dunderfile, relative_path):\n \"\"\"\n This function is useful when one needs to load a file that is\n relative to the position of the current file. (Such as when\n you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n It is meant to be used like the following:\n file_relative_path(__file__, 'path/relative/to/file')\n\n H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34\n \"\"\"\n return os.path.join(os.path.dirname(dunderfile), relative_path)\n", "path": "great_expectations/data_context/util.py"}]}
| 2,771 | 121 |
gh_patches_debug_25429
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-6524
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix test_sdists test farm test
`tests/letstest/scripts/test_sdists.sh` which we currently run as part of the release process is now failing. I think this should be fixed quickly so it doesn't cause us problems whenever we go to do our next release. The cause of the failure is that newer versions of `pytest` cannot be installed using old versions of `setuptools`.
To resolve this, we either need to use a newer version of `setuptools` or an older version of `pytest`. My thought is to upgrade `setuptools` because it should prevent problems with other packages in the future and our tests should arguably work with the latest version of every package installed, however, either approach should work.
If we upgrade `setuptools`, we need to decide where to do it. My thought would be to do it somewhere in `tools/_venv_common.py` or the scripts it calls so other code that uses `tools/_venv_common.py` would benefit. (It also looks like `tests/letstest/scripts/test_sdists.sh` still uses the old name `tools/_venv_common.sh` and needs to be updated.) `setuptools>=30.3` seems to be [required](https://github.com/pytest-dev/pytest/commit/d4351ac5a287eb96521f09618bbbe1c73f4e6884#diff-2eeaed663bd0d25b7e608891384b7298).
If we decided to pin back `pytest` (and maybe other packages) instead, I don't have an opinion on the approach.
@ohemorange or @sydneyli, are you interested in taking this? I initially planned on doing it myself but haven't managed to get to it yet and think it's important. It shouldn't be much work.
</issue>
<code>
[start of tools/_venv_common.py]
1 #!/usr/bin/env python
2 """Aids in creating a developer virtual environment for Certbot.
3
4 When this module is run as a script, it takes the arguments that should
5 be passed to pip to install the Certbot packages as command line
6 arguments. The virtual environment will be created with the name "venv"
7 in the current working directory and will use the default version of
8 Python for the virtualenv executable in your PATH. You can change the
9 name of the virtual environment by setting the environment variable
10 VENV_NAME.
11 """
12
13 from __future__ import print_function
14
15 import os
16 import shutil
17 import glob
18 import time
19 import subprocess
20 import sys
21 import re
22 import shlex
23
24 VERSION_PATTERN = re.compile(r'^(\d+)\.(\d+).*$')
25
26
27 class PythonExecutableNotFoundError(Exception):
28 pass
29
30
31 def find_python_executable(python_major):
32 # type: (int) -> str
33 """
34 Find the relevant python executable that is of the given python major version.
35 Will test, in decreasing priority order:
36 * the current Python interpreter
37 * 'pythonX' executable in PATH (with X the given major version) if available
38 * 'python' executable in PATH if available
39 * Windows Python launcher 'py' executable in PATH if available
40 Incompatible python versions for Certbot will be evicted (eg. Python < 3.5 on Windows)
41 :param int python_major: the Python major version to target (2 or 3)
42 :rtype: str
43 :return: the relevant python executable path
44 :raise RuntimeError: if no relevant python executable path could be found
45 """
46 python_executable_path = None
47
48 # First try, current python executable
49 if _check_version('{0}.{1}.{2}'.format(
50 sys.version_info[0], sys.version_info[1], sys.version_info[2]), python_major):
51 return sys.executable
52
53 # Second try, with python executables in path
54 versions_to_test = ['2.7', '2', ''] if python_major == 2 else ['3', '']
55 for one_version in versions_to_test:
56 try:
57 one_python = 'python{0}'.format(one_version)
58 output = subprocess.check_output([one_python, '--version'],
59 universal_newlines=True, stderr=subprocess.STDOUT)
60 if _check_version(output.strip().split()[1], python_major):
61 return subprocess.check_output([one_python, '-c',
62 'import sys; sys.stdout.write(sys.executable);'],
63 universal_newlines=True)
64 except (subprocess.CalledProcessError, OSError):
65 pass
66
67 # Last try, with Windows Python launcher
68 try:
69 env_arg = '-{0}'.format(python_major)
70 output_version = subprocess.check_output(['py', env_arg, '--version'],
71 universal_newlines=True, stderr=subprocess.STDOUT)
72 if _check_version(output_version.strip().split()[1], python_major):
73 return subprocess.check_output(['py', env_arg, '-c',
74 'import sys; sys.stdout.write(sys.executable);'],
75 universal_newlines=True)
76 except (subprocess.CalledProcessError, OSError):
77 pass
78
79 if not python_executable_path:
80 raise RuntimeError('Error, no compatible Python {0} executable for Certbot could be found.'
81 .format(python_major))
82
83
84 def _check_version(version_str, major_version):
85 search = VERSION_PATTERN.search(version_str)
86
87 if not search:
88 return False
89
90 version = (int(search.group(1)), int(search.group(2)))
91
92 minimal_version_supported = (2, 7)
93 if major_version == 3 and os.name == 'nt':
94 minimal_version_supported = (3, 5)
95 elif major_version == 3:
96 minimal_version_supported = (3, 4)
97
98 if version >= minimal_version_supported:
99 return True
100
101 print('Incompatible python version for Certbot found: {0}'.format(version_str))
102 return False
103
104
105 def subprocess_with_print(cmd, env=os.environ, shell=False):
106 print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
107 subprocess.check_call(cmd, env=env, shell=shell)
108
109
110 def get_venv_bin_path(venv_path):
111 python_linux = os.path.join(venv_path, 'bin/python')
112 if os.path.isfile(python_linux):
113 return os.path.abspath(os.path.dirname(python_linux))
114 python_windows = os.path.join(venv_path, 'Scripts\\python.exe')
115 if os.path.isfile(python_windows):
116 return os.path.abspath(os.path.dirname(python_windows))
117
118 raise ValueError((
119 'Error, could not find python executable in venv path {0}: is it a valid venv ?'
120 .format(venv_path)))
121
122
123 def main(venv_name, venv_args, args):
124 """Creates a virtual environment and installs packages.
125
126 :param str venv_name: The name or path at where the virtual
127 environment should be created.
128 :param str venv_args: Command line arguments for virtualenv
129 :param str args: Command line arguments that should be given to pip
130 to install packages
131 """
132
133 for path in glob.glob('*.egg-info'):
134 if os.path.isdir(path):
135 shutil.rmtree(path)
136 else:
137 os.remove(path)
138
139 env_venv_name = os.environ.get('VENV_NAME')
140 if env_venv_name:
141 print('Creating venv at {0}'
142 ' as specified in VENV_NAME'.format(env_venv_name))
143 venv_name = env_venv_name
144
145 if os.path.isdir(venv_name):
146 os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))
147
148 command = [sys.executable, '-m', 'virtualenv', '--no-site-packages', '--setuptools', venv_name]
149 command.extend(shlex.split(venv_args))
150 subprocess_with_print(command)
151
152 # We execute the two following commands in the context of the virtual environment, to install
153 # the packages in it. To do so, we append the venv binary to the PATH that will be used for
154 # these commands. With this trick, correct python executable will be selected.
155 new_environ = os.environ.copy()
156 new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])
157 subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),
158 env=new_environ, shell=True)
159 subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),
160 env=new_environ, shell=True)
161
162 if os.path.isdir(os.path.join(venv_name, 'bin')):
163 # Linux/OSX specific
164 print('-------------------------------------------------------------------')
165 print('Please run the following command to activate developer environment:')
166 print('source {0}/bin/activate'.format(venv_name))
167 print('-------------------------------------------------------------------')
168 elif os.path.isdir(os.path.join(venv_name, 'Scripts')):
169 # Windows specific
170 print('---------------------------------------------------------------------------')
171 print('Please run one of the following commands to activate developer environment:')
172 print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name))
173 print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name))
174 print('---------------------------------------------------------------------------')
175 else:
176 raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))
177
178
179 if __name__ == '__main__':
180 main('venv',
181 '',
182 sys.argv[1:])
183
[end of tools/_venv_common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/_venv_common.py b/tools/_venv_common.py
--- a/tools/_venv_common.py
+++ b/tools/_venv_common.py
@@ -149,13 +149,15 @@
command.extend(shlex.split(venv_args))
subprocess_with_print(command)
- # We execute the two following commands in the context of the virtual environment, to install
+ # We execute the following commands in the context of the virtual environment, to install
# the packages in it. To do so, we append the venv binary to the PATH that will be used for
# these commands. With this trick, correct python executable will be selected.
new_environ = os.environ.copy()
new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])
subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),
env=new_environ, shell=True)
+ subprocess_with_print("python -m pip install --upgrade 'setuptools>=30.3'",
+ env=new_environ, shell=True)
subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),
env=new_environ, shell=True)
|
{"golden_diff": "diff --git a/tools/_venv_common.py b/tools/_venv_common.py\n--- a/tools/_venv_common.py\n+++ b/tools/_venv_common.py\n@@ -149,13 +149,15 @@\n command.extend(shlex.split(venv_args))\n subprocess_with_print(command)\n \n- # We execute the two following commands in the context of the virtual environment, to install\n+ # We execute the following commands in the context of the virtual environment, to install\n # the packages in it. To do so, we append the venv binary to the PATH that will be used for\n # these commands. With this trick, correct python executable will be selected.\n new_environ = os.environ.copy()\n new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])\n subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),\n env=new_environ, shell=True)\n+ subprocess_with_print(\"python -m pip install --upgrade 'setuptools>=30.3'\",\n+ env=new_environ, shell=True)\n subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),\n env=new_environ, shell=True)\n", "issue": "Fix test_sdists test farm test\n`tests/letstest/scripts/test_sdists.sh` which we currently run as part of the release process is now failing. I think this should be fixed quickly so it doesn't cause us problems whenever we go to do our next release. The cause of the failure is that newer versions of `pytest` cannot be installed using old versions of `setuptools`.\r\n\r\nTo resolve this, we either need to use a newer version of `setuptools` or an older version of `pytest`. My thought is to upgrade `setuptools` because it should prevent problems with other packages in the future and our tests should arguably work with the latest version of every package installed, however, either approach should work.\r\n\r\nIf we upgrade `setuptools`, we need to decide where to do it. My thought would be to do it somewhere in `tools/_venv_common.py` or the scripts it calls so other code that uses `tools/_venv_common.py` would benefit. (It also looks like `tests/letstest/scripts/test_sdists.sh` still uses the old name `tools/_venv_common.sh` and needs to be updated.) `setuptools>=30.3` seems to be [required](https://github.com/pytest-dev/pytest/commit/d4351ac5a287eb96521f09618bbbe1c73f4e6884#diff-2eeaed663bd0d25b7e608891384b7298).\r\n\r\nIf we decided to pin back `pytest` (and maybe other packages) instead, I don't have an opinion on the approach.\r\n\r\n@ohemorange or @sydneyli, are you interested in taking this? I initially planned on doing it myself but haven't managed to get to it yet and think it's important. It shouldn't be much work.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Aids in creating a developer virtual environment for Certbot.\n\nWhen this module is run as a script, it takes the arguments that should\nbe passed to pip to install the Certbot packages as command line\narguments. The virtual environment will be created with the name \"venv\"\nin the current working directory and will use the default version of\nPython for the virtualenv executable in your PATH. You can change the\nname of the virtual environment by setting the environment variable\nVENV_NAME.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport glob\nimport time\nimport subprocess\nimport sys\nimport re\nimport shlex\n\nVERSION_PATTERN = re.compile(r'^(\\d+)\\.(\\d+).*$')\n\n\nclass PythonExecutableNotFoundError(Exception):\n pass\n\n\ndef find_python_executable(python_major):\n # type: (int) -> str\n \"\"\"\n Find the relevant python executable that is of the given python major version.\n Will test, in decreasing priority order:\n * the current Python interpreter\n * 'pythonX' executable in PATH (with X the given major version) if available\n * 'python' executable in PATH if available\n * Windows Python launcher 'py' executable in PATH if available\n Incompatible python versions for Certbot will be evicted (eg. Python < 3.5 on Windows)\n :param int python_major: the Python major version to target (2 or 3)\n :rtype: str\n :return: the relevant python executable path\n :raise RuntimeError: if no relevant python executable path could be found\n \"\"\"\n python_executable_path = None\n\n # First try, current python executable\n if _check_version('{0}.{1}.{2}'.format(\n sys.version_info[0], sys.version_info[1], sys.version_info[2]), python_major):\n return sys.executable\n\n # Second try, with python executables in path\n versions_to_test = ['2.7', '2', ''] if python_major == 2 else ['3', '']\n for one_version in versions_to_test:\n try:\n one_python = 'python{0}'.format(one_version)\n output = subprocess.check_output([one_python, '--version'],\n universal_newlines=True, stderr=subprocess.STDOUT)\n if _check_version(output.strip().split()[1], python_major):\n return subprocess.check_output([one_python, '-c',\n 'import sys; sys.stdout.write(sys.executable);'],\n universal_newlines=True)\n except (subprocess.CalledProcessError, OSError):\n pass\n\n # Last try, with Windows Python launcher\n try:\n env_arg = '-{0}'.format(python_major)\n output_version = subprocess.check_output(['py', env_arg, '--version'],\n universal_newlines=True, stderr=subprocess.STDOUT)\n if _check_version(output_version.strip().split()[1], python_major):\n return subprocess.check_output(['py', env_arg, '-c',\n 'import sys; sys.stdout.write(sys.executable);'],\n universal_newlines=True)\n except (subprocess.CalledProcessError, OSError):\n pass\n\n if not python_executable_path:\n raise RuntimeError('Error, no compatible Python {0} executable for Certbot could be found.'\n .format(python_major))\n\n\ndef _check_version(version_str, major_version):\n search = VERSION_PATTERN.search(version_str)\n\n if not search:\n return False\n\n version = (int(search.group(1)), int(search.group(2)))\n\n minimal_version_supported = (2, 7)\n if major_version == 3 and os.name == 'nt':\n minimal_version_supported = (3, 5)\n elif major_version == 3:\n minimal_version_supported = (3, 4)\n\n if version >= minimal_version_supported:\n return True\n\n print('Incompatible python version for Certbot found: {0}'.format(version_str))\n return False\n\n\ndef subprocess_with_print(cmd, env=os.environ, shell=False):\n print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)\n subprocess.check_call(cmd, env=env, shell=shell)\n\n\ndef get_venv_bin_path(venv_path):\n python_linux = os.path.join(venv_path, 'bin/python')\n if os.path.isfile(python_linux):\n return os.path.abspath(os.path.dirname(python_linux))\n python_windows = os.path.join(venv_path, 'Scripts\\\\python.exe')\n if os.path.isfile(python_windows):\n return os.path.abspath(os.path.dirname(python_windows))\n\n raise ValueError((\n 'Error, could not find python executable in venv path {0}: is it a valid venv ?'\n .format(venv_path)))\n\n\ndef main(venv_name, venv_args, args):\n \"\"\"Creates a virtual environment and installs packages.\n\n :param str venv_name: The name or path at where the virtual\n environment should be created.\n :param str venv_args: Command line arguments for virtualenv\n :param str args: Command line arguments that should be given to pip\n to install packages\n \"\"\"\n\n for path in glob.glob('*.egg-info'):\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n env_venv_name = os.environ.get('VENV_NAME')\n if env_venv_name:\n print('Creating venv at {0}'\n ' as specified in VENV_NAME'.format(env_venv_name))\n venv_name = env_venv_name\n\n if os.path.isdir(venv_name):\n os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))\n\n command = [sys.executable, '-m', 'virtualenv', '--no-site-packages', '--setuptools', venv_name]\n command.extend(shlex.split(venv_args))\n subprocess_with_print(command)\n\n # We execute the two following commands in the context of the virtual environment, to install\n # the packages in it. To do so, we append the venv binary to the PATH that will be used for\n # these commands. With this trick, correct python executable will be selected.\n new_environ = os.environ.copy()\n new_environ['PATH'] = os.pathsep.join([get_venv_bin_path(venv_name), new_environ['PATH']])\n subprocess_with_print('python {0}'.format('./letsencrypt-auto-source/pieces/pipstrap.py'),\n env=new_environ, shell=True)\n subprocess_with_print('python {0} {1}'.format('./tools/pip_install.py', ' '.join(args)),\n env=new_environ, shell=True)\n\n if os.path.isdir(os.path.join(venv_name, 'bin')):\n # Linux/OSX specific\n print('-------------------------------------------------------------------')\n print('Please run the following command to activate developer environment:')\n print('source {0}/bin/activate'.format(venv_name))\n print('-------------------------------------------------------------------')\n elif os.path.isdir(os.path.join(venv_name, 'Scripts')):\n # Windows specific\n print('---------------------------------------------------------------------------')\n print('Please run one of the following commands to activate developer environment:')\n print('{0}\\\\Scripts\\\\activate.bat (for Batch)'.format(venv_name))\n print('.\\\\{0}\\\\Scripts\\\\Activate.ps1 (for Powershell)'.format(venv_name))\n print('---------------------------------------------------------------------------')\n else:\n raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))\n\n\nif __name__ == '__main__':\n main('venv',\n '',\n sys.argv[1:])\n", "path": "tools/_venv_common.py"}]}
| 3,058 | 287 |
gh_patches_debug_33406
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-2315
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Start shipping our own type stubs
Typeshed houses very rudimentary type stubs for urllib3 but they're hidden within `requests.packages.urllib3` so Mypy can't take advantage of them when using urllib3 on its own. We could also probably improve on what is already in typeshed by adding better types than `Any` everywhere.
</issue>
<code>
[start of noxfile.py]
1 import os
2 import shutil
3 import subprocess
4
5 import nox
6
7 # Whenever type-hints are completed on a file it should be added here so that
8 # this file will continue to be checked by mypy. Errors from other files are
9 # ignored.
10 TYPED_FILES = {
11 "src/urllib3/contrib/__init__.py",
12 "src/urllib3/contrib/_securetransport/bindings.py",
13 "src/urllib3/contrib/_securetransport/low_level.py",
14 "src/urllib3/contrib/ntlmpool.py",
15 "src/urllib3/contrib/pyopenssl.py",
16 "src/urllib3/contrib/securetransport.py",
17 "src/urllib3/contrib/socks.py",
18 "src/urllib3/__init__.py",
19 "src/urllib3/connection.py",
20 "src/urllib3/connectionpool.py",
21 "src/urllib3/exceptions.py",
22 "src/urllib3/_collections.py",
23 "src/urllib3/fields.py",
24 "src/urllib3/filepost.py",
25 "src/urllib3/poolmanager.py",
26 "src/urllib3/request.py",
27 "src/urllib3/response.py",
28 "src/urllib3/util/connection.py",
29 "src/urllib3/util/proxy.py",
30 "src/urllib3/util/queue.py",
31 "src/urllib3/util/response.py",
32 "src/urllib3/util/ssl_.py",
33 "src/urllib3/util/ssl_match_hostname.py",
34 "src/urllib3/util/ssltransport.py",
35 "src/urllib3/util/url.py",
36 "src/urllib3/util/request.py",
37 "src/urllib3/util/retry.py",
38 "src/urllib3/util/timeout.py",
39 "src/urllib3/util/util.py",
40 "src/urllib3/util/wait.py",
41 }
42 SOURCE_FILES = [
43 "docs/",
44 "dummyserver/",
45 "src/",
46 "test/",
47 "noxfile.py",
48 "setup.py",
49 ]
50
51
52 def tests_impl(session, extras="socks,secure,brotli"):
53 # Install deps and the package itself.
54 session.install("-r", "dev-requirements.txt")
55 session.install(f".[{extras}]")
56
57 # Show the pip version.
58 session.run("pip", "--version")
59 # Print the Python version and bytesize.
60 session.run("python", "--version")
61 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)")
62 # Print OpenSSL information.
63 session.run("python", "-m", "OpenSSL.debug")
64
65 # Inspired from https://github.com/pyca/cryptography
66 # We use parallel mode and then combine here so that coverage.py will take
67 # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py
68 # and collapse them into src/urllib3/__init__.py.
69
70 session.run(
71 "coverage",
72 "run",
73 "--parallel-mode",
74 "-m",
75 "pytest",
76 "-r",
77 "a",
78 "--tb=native",
79 "--no-success-flaky-report",
80 *(session.posargs or ("test/",)),
81 env={"PYTHONWARNINGS": "always::DeprecationWarning"},
82 )
83 session.run("coverage", "combine")
84 session.run("coverage", "report", "-m")
85 session.run("coverage", "xml")
86
87
88 @nox.session(python=["3.6", "3.7", "3.8", "3.9", "3.10", "pypy"])
89 def test(session):
90 tests_impl(session)
91
92
93 @nox.session(python=["2.7"])
94 def unsupported_python2(session):
95 # Can't check both returncode and output with session.run
96 process = subprocess.run(
97 ["python", "setup.py", "install"],
98 env={**session.env},
99 text=True,
100 capture_output=True,
101 )
102 assert process.returncode == 1
103 print(process.stderr)
104 assert "Unsupported Python version" in process.stderr
105
106
107 @nox.session(python=["3"])
108 def test_brotlipy(session):
109 """Check that if 'brotlipy' is installed instead of 'brotli' or
110 'brotlicffi' that we still don't blow up.
111 """
112 session.install("brotlipy")
113 tests_impl(session, extras="socks,secure")
114
115
116 @nox.session()
117 def format(session):
118 """Run code formatters."""
119 session.install("pre-commit")
120 session.run("pre-commit", "--version")
121
122 process = subprocess.run(
123 ["pre-commit", "run", "--all-files"],
124 env=session.env,
125 text=True,
126 stdout=subprocess.PIPE,
127 stderr=subprocess.STDOUT,
128 )
129 # Ensure that pre-commit itself ran successfully
130 assert process.returncode in (0, 1)
131
132 lint(session)
133
134
135 @nox.session
136 def lint(session):
137 session.install("pre-commit")
138 session.run("pre-commit", "run", "--all-files")
139
140 mypy(session)
141
142
143 @nox.session()
144 def mypy(session):
145 """Run mypy."""
146 session.install("mypy==0.812")
147 session.install("idna>=2.0.0")
148 session.run("mypy", "--version")
149
150 session.log("mypy --strict src/urllib3")
151 all_errors, errors = [], []
152 process = subprocess.run(
153 ["mypy", "--strict", "src/urllib3"],
154 env=session.env,
155 text=True,
156 stdout=subprocess.PIPE,
157 stderr=subprocess.STDOUT,
158 )
159 # Ensure that mypy itself ran successfully
160 assert process.returncode in (0, 1)
161
162 for line in process.stdout.split("\n"):
163 all_errors.append(line)
164 filepath = line.partition(":")[0]
165 if filepath.replace(".pyi", ".py") in TYPED_FILES:
166 errors.append(line)
167 session.log(f"all errors count: {len(all_errors)}")
168 if errors:
169 session.error("\n" + "\n".join(sorted(set(errors))))
170
171
172 @nox.session
173 def docs(session):
174 session.install("-r", "docs/requirements.txt")
175 session.install(".[socks,secure,brotli]")
176
177 session.chdir("docs")
178 if os.path.exists("_build"):
179 shutil.rmtree("_build")
180 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html")
181
[end of noxfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -4,41 +4,6 @@
import nox
-# Whenever type-hints are completed on a file it should be added here so that
-# this file will continue to be checked by mypy. Errors from other files are
-# ignored.
-TYPED_FILES = {
- "src/urllib3/contrib/__init__.py",
- "src/urllib3/contrib/_securetransport/bindings.py",
- "src/urllib3/contrib/_securetransport/low_level.py",
- "src/urllib3/contrib/ntlmpool.py",
- "src/urllib3/contrib/pyopenssl.py",
- "src/urllib3/contrib/securetransport.py",
- "src/urllib3/contrib/socks.py",
- "src/urllib3/__init__.py",
- "src/urllib3/connection.py",
- "src/urllib3/connectionpool.py",
- "src/urllib3/exceptions.py",
- "src/urllib3/_collections.py",
- "src/urllib3/fields.py",
- "src/urllib3/filepost.py",
- "src/urllib3/poolmanager.py",
- "src/urllib3/request.py",
- "src/urllib3/response.py",
- "src/urllib3/util/connection.py",
- "src/urllib3/util/proxy.py",
- "src/urllib3/util/queue.py",
- "src/urllib3/util/response.py",
- "src/urllib3/util/ssl_.py",
- "src/urllib3/util/ssl_match_hostname.py",
- "src/urllib3/util/ssltransport.py",
- "src/urllib3/util/url.py",
- "src/urllib3/util/request.py",
- "src/urllib3/util/retry.py",
- "src/urllib3/util/timeout.py",
- "src/urllib3/util/util.py",
- "src/urllib3/util/wait.py",
-}
SOURCE_FILES = [
"docs/",
"dummyserver/",
@@ -146,27 +111,7 @@
session.install("mypy==0.812")
session.install("idna>=2.0.0")
session.run("mypy", "--version")
-
- session.log("mypy --strict src/urllib3")
- all_errors, errors = [], []
- process = subprocess.run(
- ["mypy", "--strict", "src/urllib3"],
- env=session.env,
- text=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- )
- # Ensure that mypy itself ran successfully
- assert process.returncode in (0, 1)
-
- for line in process.stdout.split("\n"):
- all_errors.append(line)
- filepath = line.partition(":")[0]
- if filepath.replace(".pyi", ".py") in TYPED_FILES:
- errors.append(line)
- session.log(f"all errors count: {len(all_errors)}")
- if errors:
- session.error("\n" + "\n".join(sorted(set(errors))))
+ session.run("mypy", "--strict", "src/urllib3")
@nox.session
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -4,41 +4,6 @@\n \n import nox\n \n-# Whenever type-hints are completed on a file it should be added here so that\n-# this file will continue to be checked by mypy. Errors from other files are\n-# ignored.\n-TYPED_FILES = {\n- \"src/urllib3/contrib/__init__.py\",\n- \"src/urllib3/contrib/_securetransport/bindings.py\",\n- \"src/urllib3/contrib/_securetransport/low_level.py\",\n- \"src/urllib3/contrib/ntlmpool.py\",\n- \"src/urllib3/contrib/pyopenssl.py\",\n- \"src/urllib3/contrib/securetransport.py\",\n- \"src/urllib3/contrib/socks.py\",\n- \"src/urllib3/__init__.py\",\n- \"src/urllib3/connection.py\",\n- \"src/urllib3/connectionpool.py\",\n- \"src/urllib3/exceptions.py\",\n- \"src/urllib3/_collections.py\",\n- \"src/urllib3/fields.py\",\n- \"src/urllib3/filepost.py\",\n- \"src/urllib3/poolmanager.py\",\n- \"src/urllib3/request.py\",\n- \"src/urllib3/response.py\",\n- \"src/urllib3/util/connection.py\",\n- \"src/urllib3/util/proxy.py\",\n- \"src/urllib3/util/queue.py\",\n- \"src/urllib3/util/response.py\",\n- \"src/urllib3/util/ssl_.py\",\n- \"src/urllib3/util/ssl_match_hostname.py\",\n- \"src/urllib3/util/ssltransport.py\",\n- \"src/urllib3/util/url.py\",\n- \"src/urllib3/util/request.py\",\n- \"src/urllib3/util/retry.py\",\n- \"src/urllib3/util/timeout.py\",\n- \"src/urllib3/util/util.py\",\n- \"src/urllib3/util/wait.py\",\n-}\n SOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n@@ -146,27 +111,7 @@\n session.install(\"mypy==0.812\")\n session.install(\"idna>=2.0.0\")\n session.run(\"mypy\", \"--version\")\n-\n- session.log(\"mypy --strict src/urllib3\")\n- all_errors, errors = [], []\n- process = subprocess.run(\n- [\"mypy\", \"--strict\", \"src/urllib3\"],\n- env=session.env,\n- text=True,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.STDOUT,\n- )\n- # Ensure that mypy itself ran successfully\n- assert process.returncode in (0, 1)\n-\n- for line in process.stdout.split(\"\\n\"):\n- all_errors.append(line)\n- filepath = line.partition(\":\")[0]\n- if filepath.replace(\".pyi\", \".py\") in TYPED_FILES:\n- errors.append(line)\n- session.log(f\"all errors count: {len(all_errors)}\")\n- if errors:\n- session.error(\"\\n\" + \"\\n\".join(sorted(set(errors))))\n+ session.run(\"mypy\", \"--strict\", \"src/urllib3\")\n \n \n @nox.session\n", "issue": "Start shipping our own type stubs\nTypeshed houses very rudimentary type stubs for urllib3 but they're hidden within `requests.packages.urllib3` so Mypy can't take advantage of them when using urllib3 on its own. We could also probably improve on what is already in typeshed by adding better types than `Any` everywhere.\n", "before_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\n# Whenever type-hints are completed on a file it should be added here so that\n# this file will continue to be checked by mypy. Errors from other files are\n# ignored.\nTYPED_FILES = {\n \"src/urllib3/contrib/__init__.py\",\n \"src/urllib3/contrib/_securetransport/bindings.py\",\n \"src/urllib3/contrib/_securetransport/low_level.py\",\n \"src/urllib3/contrib/ntlmpool.py\",\n \"src/urllib3/contrib/pyopenssl.py\",\n \"src/urllib3/contrib/securetransport.py\",\n \"src/urllib3/contrib/socks.py\",\n \"src/urllib3/__init__.py\",\n \"src/urllib3/connection.py\",\n \"src/urllib3/connectionpool.py\",\n \"src/urllib3/exceptions.py\",\n \"src/urllib3/_collections.py\",\n \"src/urllib3/fields.py\",\n \"src/urllib3/filepost.py\",\n \"src/urllib3/poolmanager.py\",\n \"src/urllib3/request.py\",\n \"src/urllib3/response.py\",\n \"src/urllib3/util/connection.py\",\n \"src/urllib3/util/proxy.py\",\n \"src/urllib3/util/queue.py\",\n \"src/urllib3/util/response.py\",\n \"src/urllib3/util/ssl_.py\",\n \"src/urllib3/util/ssl_match_hostname.py\",\n \"src/urllib3/util/ssltransport.py\",\n \"src/urllib3/util/url.py\",\n \"src/urllib3/util/request.py\",\n \"src/urllib3/util/retry.py\",\n \"src/urllib3/util/timeout.py\",\n \"src/urllib3/util/util.py\",\n \"src/urllib3/util/wait.py\",\n}\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(session, extras=\"socks,secure,brotli\"):\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://github.com/pyca/cryptography\n # We use parallel mode and then combine here so that coverage.py will take\n # the paths like .tox/pyXY/lib/pythonX.Y/site-packages/urllib3/__init__.py\n # and collapse them into src/urllib3/__init__.py.\n\n session.run(\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n session.run(\"coverage\", \"combine\")\n session.run(\"coverage\", \"report\", \"-m\")\n session.run(\"coverage\", \"xml\")\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\", \"pypy\"])\ndef test(session):\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_python2(session):\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Unsupported Python version\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session):\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\")\n\n\[email protected]()\ndef format(session):\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session):\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected]()\ndef mypy(session):\n \"\"\"Run mypy.\"\"\"\n session.install(\"mypy==0.812\")\n session.install(\"idna>=2.0.0\")\n session.run(\"mypy\", \"--version\")\n\n session.log(\"mypy --strict src/urllib3\")\n all_errors, errors = [], []\n process = subprocess.run(\n [\"mypy\", \"--strict\", \"src/urllib3\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that mypy itself ran successfully\n assert process.returncode in (0, 1)\n\n for line in process.stdout.split(\"\\n\"):\n all_errors.append(line)\n filepath = line.partition(\":\")[0]\n if filepath.replace(\".pyi\", \".py\") in TYPED_FILES:\n errors.append(line)\n session.log(f\"all errors count: {len(all_errors)}\")\n if errors:\n session.error(\"\\n\" + \"\\n\".join(sorted(set(errors))))\n\n\[email protected]\ndef docs(session):\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]}
| 2,462 | 767 |
gh_patches_debug_43397
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1931
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add token to websockets connector
Currently, anyone that knows opdroid url and endpoint will be able to request a socket to initialize a websocket connection.
Ideally, we should allow users to select a token in the configuration settings. When opsdroid gets a request, if the token doesn't exist then we just reject the request.
This will also a nice feature to implement along side opsdroid-web v2
</issue>
<code>
[start of opsdroid/connector/websocket/__init__.py]
1 """A connector which allows websocket connections."""
2 import json
3 import logging
4 import uuid
5 from datetime import datetime
6
7 import aiohttp
8 import aiohttp.web
9 from aiohttp import WSCloseCode
10 from opsdroid.connector import Connector, register_event
11 from opsdroid.events import Message
12
13 _LOGGER = logging.getLogger(__name__)
14 HEADERS = {"Access-Control-Allow-Origin": "*"}
15 CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
16
17
18 class ConnectorWebsocket(Connector):
19 """A connector which allows websocket connections."""
20
21 def __init__(self, config, opsdroid=None):
22 """Create the connector."""
23 super().__init__(config, opsdroid=opsdroid)
24 _LOGGER.debug(_("Starting Websocket connector."))
25 self.name = config.get("name", "websocket")
26 self.max_connections = self.config.get("max-connections", 10)
27 self.connection_timeout = self.config.get("connection-timeout", 60)
28 self.accepting_connections = True
29 self.active_connections = {}
30 self.available_connections = []
31 self.bot_name = self.config.get("bot-name", "opsdroid")
32
33 async def connect(self):
34 """Connect to the chat service."""
35 self.accepting_connections = True
36
37 self.opsdroid.web_server.web_app.router.add_get(
38 "/connector/websocket/{socket}", self.websocket_handler
39 )
40
41 self.opsdroid.web_server.web_app.router.add_post(
42 "/connector/websocket", self.new_websocket_handler
43 )
44
45 async def disconnect(self):
46 """Disconnect from current sessions."""
47 self.accepting_connections = False
48 connections_to_close = self.active_connections.copy()
49 for connection in connections_to_close:
50 await connections_to_close[connection].close(
51 code=WSCloseCode.GOING_AWAY, message="Server shutdown"
52 )
53
54 async def new_websocket_handler(self, request):
55 """Handle for aiohttp creating websocket connections."""
56 if (
57 len(self.active_connections) + len(self.available_connections)
58 < self.max_connections
59 and self.accepting_connections
60 ):
61 socket = {"id": str(uuid.uuid1()), "date": datetime.now()}
62 self.available_connections.append(socket)
63 return aiohttp.web.Response(
64 text=json.dumps({"socket": socket["id"]}), headers=HEADERS, status=200
65 )
66 return aiohttp.web.Response(
67 text=json.dumps("No connections available"), headers=HEADERS, status=429
68 )
69
70 async def websocket_handler(self, request):
71 """Handle for aiohttp handling websocket connections."""
72 socket = request.match_info.get("socket")
73 available = [
74 item for item in self.available_connections if item["id"] == socket
75 ]
76 if len(available) != 1:
77 return aiohttp.web.Response(
78 text=json.dumps("Please request a socket first"),
79 headers=HEADERS,
80 status=400,
81 )
82 if (
83 datetime.now() - available[0]["date"]
84 ).total_seconds() > self.connection_timeout:
85 self.available_connections.remove(available[0])
86 return aiohttp.web.Response(
87 text=json.dumps("Socket request timed out"), headers=HEADERS, status=408
88 )
89 self.available_connections.remove(available[0])
90 _LOGGER.debug(_("User connected to %s."), socket)
91
92 websocket = aiohttp.web.WebSocketResponse()
93 await websocket.prepare(request)
94
95 self.active_connections[socket] = websocket
96 async for msg in websocket:
97 if msg.type == aiohttp.WSMsgType.TEXT:
98 message = Message(text=msg.data, user=None, target=None, connector=self)
99 await self.opsdroid.parse(message)
100 elif msg.type == aiohttp.WSMsgType.ERROR:
101 _LOGGER.error(
102 _("Websocket connection closed with exception %s."),
103 websocket.exception(),
104 )
105
106 _LOGGER.info(_("websocket connection closed"))
107 self.active_connections.pop(socket, None)
108
109 return websocket
110
111 async def listen(self):
112 """Listen for and parse new messages.
113
114 Listening is handled by the aiohttp web server so
115 we don't need to do anything here.
116
117 """
118
119 @register_event(Message)
120 async def send_message(self, message):
121 """Respond with a message."""
122 try:
123 if message.target is None:
124 message.target = next(iter(self.active_connections))
125 _LOGGER.debug(
126 _("Responding with: '%s' in target %s"), message.text, message.target
127 )
128 await self.active_connections[message.target].send_str(message.text)
129 except KeyError:
130 _LOGGER.error(_("No active socket for target %s"), message.target)
131
[end of opsdroid/connector/websocket/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/connector/websocket/__init__.py b/opsdroid/connector/websocket/__init__.py
--- a/opsdroid/connector/websocket/__init__.py
+++ b/opsdroid/connector/websocket/__init__.py
@@ -9,12 +9,49 @@
from aiohttp import WSCloseCode
from opsdroid.connector import Connector, register_event
from opsdroid.events import Message
+import dataclasses
+from typing import Optional
_LOGGER = logging.getLogger(__name__)
HEADERS = {"Access-Control-Allow-Origin": "*"}
CONFIG_SCHEMA = {"bot-name": str, "max-connections": int, "connection-timeout": int}
[email protected]
+class WebsocketMessage:
+ """A message received from a websocket connection."""
+
+ message: str
+ user: Optional[str]
+ socket: Optional[str]
+
+ @classmethod
+ def parse_payload(cls, payload: str):
+ """Parse the payload of a websocket message.
+
+ We will try to parse the payload as a json string.
+ If that fails, we will use the default values which are:
+
+ message: str
+ user: None
+ socket: None
+
+ """
+ try:
+ data = json.loads(payload)
+ return cls(
+ message=data.get("message"),
+ user=data.get("user"),
+ socket=data.get("socket"),
+ )
+ except json.JSONDecodeError:
+ return cls(
+ message=payload,
+ user=None,
+ socket=None,
+ )
+
+
class ConnectorWebsocket(Connector):
"""A connector which allows websocket connections."""
@@ -29,6 +66,7 @@
self.active_connections = {}
self.available_connections = []
self.bot_name = self.config.get("bot-name", "opsdroid")
+ self.authorization_token = self.config.get("token")
async def connect(self):
"""Connect to the chat service."""
@@ -53,6 +91,7 @@
async def new_websocket_handler(self, request):
"""Handle for aiohttp creating websocket connections."""
+ await self.validate_request(request)
if (
len(self.active_connections) + len(self.available_connections)
< self.max_connections
@@ -95,7 +134,13 @@
self.active_connections[socket] = websocket
async for msg in websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
- message = Message(text=msg.data, user=None, target=None, connector=self)
+ payload = WebsocketMessage.parse_payload(msg.data)
+ message = Message(
+ text=payload.message,
+ user=payload.user,
+ target=payload.socket,
+ connector=self,
+ )
await self.opsdroid.parse(message)
elif msg.type == aiohttp.WSMsgType.ERROR:
_LOGGER.error(
@@ -108,6 +153,20 @@
return websocket
+ async def validate_request(self, request):
+ """Validate the request by looking at headers and the connector token.
+
+ If the token does not exist in the header, but exists in the configuration,
+ then we will simply return a Forbidden error.
+
+ """
+ client_token = request.headers.get("Authorization")
+ if self.authorization_token and (
+ client_token is None or client_token != self.authorization_token
+ ):
+ raise aiohttp.web.HTTPUnauthorized()
+ return True
+
async def listen(self):
"""Listen for and parse new messages.
@@ -117,7 +176,7 @@
"""
@register_event(Message)
- async def send_message(self, message):
+ async def send_message(self, message: Message):
"""Respond with a message."""
try:
if message.target is None:
|
{"golden_diff": "diff --git a/opsdroid/connector/websocket/__init__.py b/opsdroid/connector/websocket/__init__.py\n--- a/opsdroid/connector/websocket/__init__.py\n+++ b/opsdroid/connector/websocket/__init__.py\n@@ -9,12 +9,49 @@\n from aiohttp import WSCloseCode\n from opsdroid.connector import Connector, register_event\n from opsdroid.events import Message\n+import dataclasses\n+from typing import Optional\n \n _LOGGER = logging.getLogger(__name__)\n HEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\n CONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n \n \[email protected]\n+class WebsocketMessage:\n+ \"\"\"A message received from a websocket connection.\"\"\"\n+\n+ message: str\n+ user: Optional[str]\n+ socket: Optional[str]\n+\n+ @classmethod\n+ def parse_payload(cls, payload: str):\n+ \"\"\"Parse the payload of a websocket message.\n+\n+ We will try to parse the payload as a json string.\n+ If that fails, we will use the default values which are:\n+\n+ message: str\n+ user: None\n+ socket: None\n+\n+ \"\"\"\n+ try:\n+ data = json.loads(payload)\n+ return cls(\n+ message=data.get(\"message\"),\n+ user=data.get(\"user\"),\n+ socket=data.get(\"socket\"),\n+ )\n+ except json.JSONDecodeError:\n+ return cls(\n+ message=payload,\n+ user=None,\n+ socket=None,\n+ )\n+\n+\n class ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n \n@@ -29,6 +66,7 @@\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n+ self.authorization_token = self.config.get(\"token\")\n \n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n@@ -53,6 +91,7 @@\n \n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n+ await self.validate_request(request)\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n@@ -95,7 +134,13 @@\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n- message = Message(text=msg.data, user=None, target=None, connector=self)\n+ payload = WebsocketMessage.parse_payload(msg.data)\n+ message = Message(\n+ text=payload.message,\n+ user=payload.user,\n+ target=payload.socket,\n+ connector=self,\n+ )\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n@@ -108,6 +153,20 @@\n \n return websocket\n \n+ async def validate_request(self, request):\n+ \"\"\"Validate the request by looking at headers and the connector token.\n+\n+ If the token does not exist in the header, but exists in the configuration,\n+ then we will simply return a Forbidden error.\n+\n+ \"\"\"\n+ client_token = request.headers.get(\"Authorization\")\n+ if self.authorization_token and (\n+ client_token is None or client_token != self.authorization_token\n+ ):\n+ raise aiohttp.web.HTTPUnauthorized()\n+ return True\n+\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n \n@@ -117,7 +176,7 @@\n \"\"\"\n \n @register_event(Message)\n- async def send_message(self, message):\n+ async def send_message(self, message: Message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n", "issue": "Add token to websockets connector\nCurrently, anyone that knows opdroid url and endpoint will be able to request a socket to initialize a websocket connection.\r\n\r\nIdeally, we should allow users to select a token in the configuration settings. When opsdroid gets a request, if the token doesn't exist then we just reject the request.\r\n\r\nThis will also a nice feature to implement along side opsdroid-web v2\n", "before_files": [{"content": "\"\"\"A connector which allows websocket connections.\"\"\"\nimport json\nimport logging\nimport uuid\nfrom datetime import datetime\n\nimport aiohttp\nimport aiohttp.web\nfrom aiohttp import WSCloseCode\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message\n\n_LOGGER = logging.getLogger(__name__)\nHEADERS = {\"Access-Control-Allow-Origin\": \"*\"}\nCONFIG_SCHEMA = {\"bot-name\": str, \"max-connections\": int, \"connection-timeout\": int}\n\n\nclass ConnectorWebsocket(Connector):\n \"\"\"A connector which allows websocket connections.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Websocket connector.\"))\n self.name = config.get(\"name\", \"websocket\")\n self.max_connections = self.config.get(\"max-connections\", 10)\n self.connection_timeout = self.config.get(\"connection-timeout\", 60)\n self.accepting_connections = True\n self.active_connections = {}\n self.available_connections = []\n self.bot_name = self.config.get(\"bot-name\", \"opsdroid\")\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n self.accepting_connections = True\n\n self.opsdroid.web_server.web_app.router.add_get(\n \"/connector/websocket/{socket}\", self.websocket_handler\n )\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/websocket\", self.new_websocket_handler\n )\n\n async def disconnect(self):\n \"\"\"Disconnect from current sessions.\"\"\"\n self.accepting_connections = False\n connections_to_close = self.active_connections.copy()\n for connection in connections_to_close:\n await connections_to_close[connection].close(\n code=WSCloseCode.GOING_AWAY, message=\"Server shutdown\"\n )\n\n async def new_websocket_handler(self, request):\n \"\"\"Handle for aiohttp creating websocket connections.\"\"\"\n if (\n len(self.active_connections) + len(self.available_connections)\n < self.max_connections\n and self.accepting_connections\n ):\n socket = {\"id\": str(uuid.uuid1()), \"date\": datetime.now()}\n self.available_connections.append(socket)\n return aiohttp.web.Response(\n text=json.dumps({\"socket\": socket[\"id\"]}), headers=HEADERS, status=200\n )\n return aiohttp.web.Response(\n text=json.dumps(\"No connections available\"), headers=HEADERS, status=429\n )\n\n async def websocket_handler(self, request):\n \"\"\"Handle for aiohttp handling websocket connections.\"\"\"\n socket = request.match_info.get(\"socket\")\n available = [\n item for item in self.available_connections if item[\"id\"] == socket\n ]\n if len(available) != 1:\n return aiohttp.web.Response(\n text=json.dumps(\"Please request a socket first\"),\n headers=HEADERS,\n status=400,\n )\n if (\n datetime.now() - available[0][\"date\"]\n ).total_seconds() > self.connection_timeout:\n self.available_connections.remove(available[0])\n return aiohttp.web.Response(\n text=json.dumps(\"Socket request timed out\"), headers=HEADERS, status=408\n )\n self.available_connections.remove(available[0])\n _LOGGER.debug(_(\"User connected to %s.\"), socket)\n\n websocket = aiohttp.web.WebSocketResponse()\n await websocket.prepare(request)\n\n self.active_connections[socket] = websocket\n async for msg in websocket:\n if msg.type == aiohttp.WSMsgType.TEXT:\n message = Message(text=msg.data, user=None, target=None, connector=self)\n await self.opsdroid.parse(message)\n elif msg.type == aiohttp.WSMsgType.ERROR:\n _LOGGER.error(\n _(\"Websocket connection closed with exception %s.\"),\n websocket.exception(),\n )\n\n _LOGGER.info(_(\"websocket connection closed\"))\n self.active_connections.pop(socket, None)\n\n return websocket\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\n\n Listening is handled by the aiohttp web server so\n we don't need to do anything here.\n\n \"\"\"\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n try:\n if message.target is None:\n message.target = next(iter(self.active_connections))\n _LOGGER.debug(\n _(\"Responding with: '%s' in target %s\"), message.text, message.target\n )\n await self.active_connections[message.target].send_str(message.text)\n except KeyError:\n _LOGGER.error(_(\"No active socket for target %s\"), message.target)\n", "path": "opsdroid/connector/websocket/__init__.py"}]}
| 1,913 | 850 |
gh_patches_debug_33659
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1511
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Standardize theme interface
Themes should have a static set of requirements that all themes should have. This really just means we need to provide more guidance on designing themes.
</issue>
<code>
[start of CTFd/constants/teams.py]
1 from collections import namedtuple
2
3 TeamAttrs = namedtuple(
4 "TeamAttrs",
5 [
6 "id",
7 "oauth_id",
8 "name",
9 "email",
10 "secret",
11 "website",
12 "affiliation",
13 "country",
14 "bracket",
15 "hidden",
16 "banned",
17 "captain_id",
18 "created",
19 ],
20 )
21
[end of CTFd/constants/teams.py]
[start of CTFd/utils/initialization/__init__.py]
1 import datetime
2 import logging
3 import os
4 import sys
5
6 from flask import abort, redirect, render_template, request, session, url_for
7 from sqlalchemy.exc import IntegrityError, InvalidRequestError
8 from werkzeug.middleware.dispatcher import DispatcherMiddleware
9
10 from CTFd.cache import clear_user_recent_ips
11 from CTFd.exceptions import UserNotFoundException, UserTokenExpiredException
12 from CTFd.models import Tracking, db
13 from CTFd.utils import config, get_config, markdown
14 from CTFd.utils.config import (
15 can_send_mail,
16 ctf_logo,
17 ctf_name,
18 ctf_theme,
19 integrations,
20 is_setup,
21 )
22 from CTFd.utils.config.pages import get_pages
23 from CTFd.utils.countries import get_countries, lookup_country_code
24 from CTFd.utils.dates import isoformat, unix_time, unix_time_millis
25 from CTFd.utils.events import EventManager, RedisEventManager
26 from CTFd.utils.humanize.words import pluralize
27 from CTFd.utils.modes import generate_account_url, get_mode_as_word
28 from CTFd.utils.plugins import (
29 get_configurable_plugins,
30 get_registered_admin_scripts,
31 get_registered_admin_stylesheets,
32 get_registered_scripts,
33 get_registered_stylesheets,
34 )
35 from CTFd.utils.security.auth import login_user, logout_user, lookup_user_token
36 from CTFd.utils.security.csrf import generate_nonce
37 from CTFd.utils.user import (
38 authed,
39 get_current_team_attrs,
40 get_current_user_attrs,
41 get_current_user_recent_ips,
42 get_ip,
43 is_admin,
44 )
45
46
47 def init_template_filters(app):
48 app.jinja_env.filters["markdown"] = markdown
49 app.jinja_env.filters["unix_time"] = unix_time
50 app.jinja_env.filters["unix_time_millis"] = unix_time_millis
51 app.jinja_env.filters["isoformat"] = isoformat
52 app.jinja_env.filters["pluralize"] = pluralize
53
54
55 def init_template_globals(app):
56 from CTFd.constants.config import Configs
57 from CTFd.constants.plugins import Plugins
58 from CTFd.constants.sessions import Session
59 from CTFd.forms import Forms
60 from CTFd.utils.config.visibility import (
61 accounts_visible,
62 challenges_visible,
63 registration_visible,
64 scores_visible,
65 )
66
67 app.jinja_env.globals.update(config=config)
68 app.jinja_env.globals.update(get_pages=get_pages)
69 app.jinja_env.globals.update(can_send_mail=can_send_mail)
70 app.jinja_env.globals.update(get_ctf_name=ctf_name)
71 app.jinja_env.globals.update(get_ctf_logo=ctf_logo)
72 app.jinja_env.globals.update(get_ctf_theme=ctf_theme)
73 app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)
74 app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)
75 app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)
76 app.jinja_env.globals.update(
77 get_registered_admin_scripts=get_registered_admin_scripts
78 )
79 app.jinja_env.globals.update(
80 get_registered_admin_stylesheets=get_registered_admin_stylesheets
81 )
82 app.jinja_env.globals.update(get_config=get_config)
83 app.jinja_env.globals.update(generate_account_url=generate_account_url)
84 app.jinja_env.globals.update(get_countries=get_countries)
85 app.jinja_env.globals.update(lookup_country_code=lookup_country_code)
86 app.jinja_env.globals.update(accounts_visible=accounts_visible)
87 app.jinja_env.globals.update(challenges_visible=challenges_visible)
88 app.jinja_env.globals.update(registration_visible=registration_visible)
89 app.jinja_env.globals.update(scores_visible=scores_visible)
90 app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)
91 app.jinja_env.globals.update(integrations=integrations)
92 app.jinja_env.globals.update(authed=authed)
93 app.jinja_env.globals.update(is_admin=is_admin)
94 app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)
95 app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)
96 app.jinja_env.globals.update(get_ip=get_ip)
97 app.jinja_env.globals.update(Configs=Configs)
98 app.jinja_env.globals.update(Plugins=Plugins)
99 app.jinja_env.globals.update(Session=Session)
100 app.jinja_env.globals.update(Forms=Forms)
101
102
103 def init_logs(app):
104 logger_submissions = logging.getLogger("submissions")
105 logger_logins = logging.getLogger("logins")
106 logger_registrations = logging.getLogger("registrations")
107
108 logger_submissions.setLevel(logging.INFO)
109 logger_logins.setLevel(logging.INFO)
110 logger_registrations.setLevel(logging.INFO)
111
112 log_dir = app.config["LOG_FOLDER"]
113 if not os.path.exists(log_dir):
114 os.makedirs(log_dir)
115
116 logs = {
117 "submissions": os.path.join(log_dir, "submissions.log"),
118 "logins": os.path.join(log_dir, "logins.log"),
119 "registrations": os.path.join(log_dir, "registrations.log"),
120 }
121
122 try:
123 for log in logs.values():
124 if not os.path.exists(log):
125 open(log, "a").close()
126
127 submission_log = logging.handlers.RotatingFileHandler(
128 logs["submissions"], maxBytes=10485760, backupCount=5
129 )
130 login_log = logging.handlers.RotatingFileHandler(
131 logs["logins"], maxBytes=10485760, backupCount=5
132 )
133 registration_log = logging.handlers.RotatingFileHandler(
134 logs["registrations"], maxBytes=10485760, backupCount=5
135 )
136
137 logger_submissions.addHandler(submission_log)
138 logger_logins.addHandler(login_log)
139 logger_registrations.addHandler(registration_log)
140 except IOError:
141 pass
142
143 stdout = logging.StreamHandler(stream=sys.stdout)
144
145 logger_submissions.addHandler(stdout)
146 logger_logins.addHandler(stdout)
147 logger_registrations.addHandler(stdout)
148
149 logger_submissions.propagate = 0
150 logger_logins.propagate = 0
151 logger_registrations.propagate = 0
152
153
154 def init_events(app):
155 if app.config.get("CACHE_TYPE") == "redis":
156 app.events_manager = RedisEventManager()
157 elif app.config.get("CACHE_TYPE") == "filesystem":
158 app.events_manager = EventManager()
159 else:
160 app.events_manager = EventManager()
161
162
163 def init_request_processors(app):
164 @app.url_defaults
165 def inject_theme(endpoint, values):
166 if "theme" not in values and app.url_map.is_endpoint_expecting(
167 endpoint, "theme"
168 ):
169 values["theme"] = ctf_theme()
170
171 @app.before_request
172 def needs_setup():
173 if is_setup() is False:
174 if request.endpoint in (
175 "views.setup",
176 "views.integrations",
177 "views.themes",
178 ):
179 return
180 else:
181 return redirect(url_for("views.setup"))
182
183 @app.before_request
184 def tracker():
185 if request.endpoint == "views.themes":
186 return
187
188 if authed():
189 user_ips = get_current_user_recent_ips()
190 ip = get_ip()
191
192 track = None
193 if (ip not in user_ips) or (request.method != "GET"):
194 track = Tracking.query.filter_by(
195 ip=get_ip(), user_id=session["id"]
196 ).first()
197
198 if track:
199 track.date = datetime.datetime.utcnow()
200 else:
201 track = Tracking(ip=get_ip(), user_id=session["id"])
202 db.session.add(track)
203
204 if track:
205 try:
206 db.session.commit()
207 except (InvalidRequestError, IntegrityError):
208 db.session.rollback()
209 db.session.close()
210 logout_user()
211 else:
212 clear_user_recent_ips(user_id=session["id"])
213
214 @app.before_request
215 def banned():
216 if request.endpoint == "views.themes":
217 return
218
219 if authed():
220 user = get_current_user_attrs()
221 team = get_current_team_attrs()
222
223 if user and user.banned:
224 return (
225 render_template(
226 "errors/403.html", error="You have been banned from this CTF"
227 ),
228 403,
229 )
230
231 if team and team.banned:
232 return (
233 render_template(
234 "errors/403.html",
235 error="Your team has been banned from this CTF",
236 ),
237 403,
238 )
239
240 @app.before_request
241 def tokens():
242 token = request.headers.get("Authorization")
243 if token and request.content_type == "application/json":
244 try:
245 token_type, token = token.split(" ", 1)
246 user = lookup_user_token(token)
247 except UserNotFoundException:
248 abort(401)
249 except UserTokenExpiredException:
250 abort(401)
251 except Exception:
252 abort(401)
253 else:
254 login_user(user)
255
256 @app.before_request
257 def csrf():
258 try:
259 func = app.view_functions[request.endpoint]
260 except KeyError:
261 abort(404)
262 if hasattr(func, "_bypass_csrf"):
263 return
264 if request.headers.get("Authorization"):
265 return
266 if not session.get("nonce"):
267 session["nonce"] = generate_nonce()
268 if request.method not in ("GET", "HEAD", "OPTIONS", "TRACE"):
269 if request.content_type == "application/json":
270 if session["nonce"] != request.headers.get("CSRF-Token"):
271 abort(403)
272 if request.content_type != "application/json":
273 if session["nonce"] != request.form.get("nonce"):
274 abort(403)
275
276 application_root = app.config.get("APPLICATION_ROOT")
277 if application_root != "/":
278
279 @app.before_request
280 def force_subdirectory_redirect():
281 if request.path.startswith(application_root) is False:
282 return redirect(
283 application_root + request.script_root + request.full_path
284 )
285
286 app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})
287
[end of CTFd/utils/initialization/__init__.py]
[start of CTFd/constants/users.py]
1 from collections import namedtuple
2
3 UserAttrs = namedtuple(
4 "UserAttrs",
5 [
6 "id",
7 "oauth_id",
8 "name",
9 "email",
10 "type",
11 "secret",
12 "website",
13 "affiliation",
14 "country",
15 "bracket",
16 "hidden",
17 "banned",
18 "verified",
19 "team_id",
20 "created",
21 ],
22 )
23
[end of CTFd/constants/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/constants/teams.py b/CTFd/constants/teams.py
--- a/CTFd/constants/teams.py
+++ b/CTFd/constants/teams.py
@@ -18,3 +18,32 @@
"created",
],
)
+
+
+class _TeamAttrsWrapper:
+ def __getattr__(self, attr):
+ from CTFd.utils.user import get_current_team_attrs
+
+ attrs = get_current_team_attrs()
+ return getattr(attrs, attr, None)
+
+ @property
+ def place(self):
+ from CTFd.utils.user import get_current_team
+
+ team = get_current_team()
+ if team:
+ return team.place
+ return None
+
+ @property
+ def score(self):
+ from CTFd.utils.user import get_current_team
+
+ team = get_current_team()
+ if team:
+ return team.score
+ return None
+
+
+Team = _TeamAttrsWrapper()
diff --git a/CTFd/constants/users.py b/CTFd/constants/users.py
--- a/CTFd/constants/users.py
+++ b/CTFd/constants/users.py
@@ -20,3 +20,32 @@
"created",
],
)
+
+
+class _UserAttrsWrapper:
+ def __getattr__(self, attr):
+ from CTFd.utils.user import get_current_user_attrs
+
+ attrs = get_current_user_attrs()
+ return getattr(attrs, attr, None)
+
+ @property
+ def place(self):
+ from CTFd.utils.user import get_current_user
+
+ user = get_current_user()
+ if user:
+ return user.place
+ return None
+
+ @property
+ def score(self):
+ from CTFd.utils.user import get_current_user
+
+ user = get_current_user()
+ if user:
+ return user.score
+ return None
+
+
+User = _UserAttrsWrapper()
diff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py
--- a/CTFd/utils/initialization/__init__.py
+++ b/CTFd/utils/initialization/__init__.py
@@ -56,6 +56,8 @@
from CTFd.constants.config import Configs
from CTFd.constants.plugins import Plugins
from CTFd.constants.sessions import Session
+ from CTFd.constants.users import User
+ from CTFd.constants.teams import Team
from CTFd.forms import Forms
from CTFd.utils.config.visibility import (
accounts_visible,
@@ -98,6 +100,8 @@
app.jinja_env.globals.update(Plugins=Plugins)
app.jinja_env.globals.update(Session=Session)
app.jinja_env.globals.update(Forms=Forms)
+ app.jinja_env.globals.update(User=User)
+ app.jinja_env.globals.update(Team=Team)
def init_logs(app):
|
{"golden_diff": "diff --git a/CTFd/constants/teams.py b/CTFd/constants/teams.py\n--- a/CTFd/constants/teams.py\n+++ b/CTFd/constants/teams.py\n@@ -18,3 +18,32 @@\n \"created\",\n ],\n )\n+\n+\n+class _TeamAttrsWrapper:\n+ def __getattr__(self, attr):\n+ from CTFd.utils.user import get_current_team_attrs\n+\n+ attrs = get_current_team_attrs()\n+ return getattr(attrs, attr, None)\n+\n+ @property\n+ def place(self):\n+ from CTFd.utils.user import get_current_team\n+\n+ team = get_current_team()\n+ if team:\n+ return team.place\n+ return None\n+\n+ @property\n+ def score(self):\n+ from CTFd.utils.user import get_current_team\n+\n+ team = get_current_team()\n+ if team:\n+ return team.score\n+ return None\n+\n+\n+Team = _TeamAttrsWrapper()\ndiff --git a/CTFd/constants/users.py b/CTFd/constants/users.py\n--- a/CTFd/constants/users.py\n+++ b/CTFd/constants/users.py\n@@ -20,3 +20,32 @@\n \"created\",\n ],\n )\n+\n+\n+class _UserAttrsWrapper:\n+ def __getattr__(self, attr):\n+ from CTFd.utils.user import get_current_user_attrs\n+\n+ attrs = get_current_user_attrs()\n+ return getattr(attrs, attr, None)\n+\n+ @property\n+ def place(self):\n+ from CTFd.utils.user import get_current_user\n+\n+ user = get_current_user()\n+ if user:\n+ return user.place\n+ return None\n+\n+ @property\n+ def score(self):\n+ from CTFd.utils.user import get_current_user\n+\n+ user = get_current_user()\n+ if user:\n+ return user.score\n+ return None\n+\n+\n+User = _UserAttrsWrapper()\ndiff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py\n--- a/CTFd/utils/initialization/__init__.py\n+++ b/CTFd/utils/initialization/__init__.py\n@@ -56,6 +56,8 @@\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n+ from CTFd.constants.users import User\n+ from CTFd.constants.teams import Team\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n@@ -98,6 +100,8 @@\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Forms=Forms)\n+ app.jinja_env.globals.update(User=User)\n+ app.jinja_env.globals.update(Team=Team)\n \n \n def init_logs(app):\n", "issue": "Standardize theme interface\nThemes should have a static set of requirements that all themes should have. This really just means we need to provide more guidance on designing themes. \n", "before_files": [{"content": "from collections import namedtuple\n\nTeamAttrs = namedtuple(\n \"TeamAttrs\",\n [\n \"id\",\n \"oauth_id\",\n \"name\",\n \"email\",\n \"secret\",\n \"website\",\n \"affiliation\",\n \"country\",\n \"bracket\",\n \"hidden\",\n \"banned\",\n \"captain_id\",\n \"created\",\n ],\n)\n", "path": "CTFd/constants/teams.py"}, {"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nfrom CTFd.cache import clear_user_recent_ips\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.countries import get_countries, lookup_country_code\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.humanize.words import pluralize\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import (\n authed,\n get_current_team_attrs,\n get_current_user_attrs,\n get_current_user_recent_ips,\n get_ip,\n is_admin,\n)\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n app.jinja_env.filters[\"pluralize\"] = pluralize\n\n\ndef init_template_globals(app):\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n )\n\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n app.jinja_env.globals.update(is_admin=is_admin)\n app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)\n app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)\n app.jinja_env.globals.update(get_ip=get_ip)\n app.jinja_env.globals.update(Configs=Configs)\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Forms=Forms)\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10485760, backupCount=5\n )\n login_log = logging.handlers.RotatingFileHandler(\n logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n\n\ndef init_request_processors(app):\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user_ips = get_current_user_recent_ips()\n ip = get_ip()\n\n track = None\n if (ip not in user_ips) or (request.method != \"GET\"):\n track = Tracking.query.filter_by(\n ip=get_ip(), user_id=session[\"id\"]\n ).first()\n\n if track:\n track.date = datetime.datetime.utcnow()\n else:\n track = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(track)\n\n if track:\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n db.session.close()\n logout_user()\n else:\n clear_user_recent_ips(user_id=session[\"id\"])\n\n @app.before_request\n def banned():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user = get_current_user_attrs()\n team = get_current_team_attrs()\n\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\", error=\"You have been banned from this CTF\"\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401)\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}, {"content": "from collections import namedtuple\n\nUserAttrs = namedtuple(\n \"UserAttrs\",\n [\n \"id\",\n \"oauth_id\",\n \"name\",\n \"email\",\n \"type\",\n \"secret\",\n \"website\",\n \"affiliation\",\n \"country\",\n \"bracket\",\n \"hidden\",\n \"banned\",\n \"verified\",\n \"team_id\",\n \"created\",\n ],\n)\n", "path": "CTFd/constants/users.py"}]}
| 3,784 | 670 |
gh_patches_debug_30899
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1797
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: DTO Factory exclude fields in nested complex types
### Summary
Need to document procedure for excluding fields from nested complex types, e.g.,
```py
class Model1:
a: int
b: str
class Model2:
c: int
d: str
class Model3:
e: Union[Model1, Model2]
config = DTOConfig(exclude={"e.0.a", "e.1.c"})
...
```
</issue>
<code>
[start of docs/examples/data_transfer_objects/factory/excluding_fields.py]
1 from datetime import datetime
2 from uuid import UUID
3
4 from sqlalchemy import ForeignKey
5 from sqlalchemy.orm import Mapped, mapped_column, relationship
6 from typing_extensions import Annotated
7
8 from litestar import Litestar, post
9 from litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO
10 from litestar.dto.factory import DTOConfig, dto_field
11
12 from .my_lib import Base
13
14
15 class Address(Base):
16 street: Mapped[str]
17 city: Mapped[str]
18 state: Mapped[str]
19 zip: Mapped[str]
20
21
22 class User(Base):
23 name: Mapped[str]
24 password: Mapped[str] = mapped_column(info=dto_field("private"))
25 created_at: Mapped[datetime] = mapped_column(info=dto_field("read-only"))
26 address_id: Mapped[UUID] = mapped_column(ForeignKey("address.id"), info=dto_field("private"))
27 address: Mapped[Address] = relationship(info=dto_field("read-only"))
28
29
30 UserDTO = SQLAlchemyDTO[User]
31 config = DTOConfig(exclude={"id", "address.id", "address.street"})
32 ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]
33
34
35 @post("/users", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)
36 def create_user(data: User) -> User:
37 data.created_at = datetime.min
38 data.address = Address(street="123 Main St", city="Anytown", state="NY", zip="12345")
39 return data
40
41
42 app = Litestar(route_handlers=[create_user])
43
44 # run: /users -H "Content-Type: application/json" -d '{"name":"Litestar User","password":"xyz","created_at":"2023-04-24T00:00:00Z"}'
45
[end of docs/examples/data_transfer_objects/factory/excluding_fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/examples/data_transfer_objects/factory/excluding_fields.py b/docs/examples/data_transfer_objects/factory/excluding_fields.py
--- a/docs/examples/data_transfer_objects/factory/excluding_fields.py
+++ b/docs/examples/data_transfer_objects/factory/excluding_fields.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from typing import List
from uuid import UUID
from sqlalchemy import ForeignKey
@@ -19,16 +20,30 @@
zip: Mapped[str]
+class Pets(Base):
+ name: Mapped[str]
+ user_id: Mapped[UUID] = mapped_column(ForeignKey("user.id"))
+
+
class User(Base):
name: Mapped[str]
password: Mapped[str] = mapped_column(info=dto_field("private"))
created_at: Mapped[datetime] = mapped_column(info=dto_field("read-only"))
address_id: Mapped[UUID] = mapped_column(ForeignKey("address.id"), info=dto_field("private"))
address: Mapped[Address] = relationship(info=dto_field("read-only"))
+ pets: Mapped[List[Pets]] = relationship(info=dto_field("read-only"))
UserDTO = SQLAlchemyDTO[User]
-config = DTOConfig(exclude={"id", "address.id", "address.street"})
+config = DTOConfig(
+ exclude={
+ "id",
+ "address.id",
+ "address.street",
+ "pets.0.id",
+ "pets.0.user_id",
+ }
+)
ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]
@@ -36,6 +51,7 @@
def create_user(data: User) -> User:
data.created_at = datetime.min
data.address = Address(street="123 Main St", city="Anytown", state="NY", zip="12345")
+ data.pets = [Pets(id=1, name="Fido"), Pets(id=2, name="Spot")]
return data
|
{"golden_diff": "diff --git a/docs/examples/data_transfer_objects/factory/excluding_fields.py b/docs/examples/data_transfer_objects/factory/excluding_fields.py\n--- a/docs/examples/data_transfer_objects/factory/excluding_fields.py\n+++ b/docs/examples/data_transfer_objects/factory/excluding_fields.py\n@@ -1,4 +1,5 @@\n from datetime import datetime\n+from typing import List\n from uuid import UUID\n \n from sqlalchemy import ForeignKey\n@@ -19,16 +20,30 @@\n zip: Mapped[str]\n \n \n+class Pets(Base):\n+ name: Mapped[str]\n+ user_id: Mapped[UUID] = mapped_column(ForeignKey(\"user.id\"))\n+\n+\n class User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n+ pets: Mapped[List[Pets]] = relationship(info=dto_field(\"read-only\"))\n \n \n UserDTO = SQLAlchemyDTO[User]\n-config = DTOConfig(exclude={\"id\", \"address.id\", \"address.street\"})\n+config = DTOConfig(\n+ exclude={\n+ \"id\",\n+ \"address.id\",\n+ \"address.street\",\n+ \"pets.0.id\",\n+ \"pets.0.user_id\",\n+ }\n+)\n ReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n \n \n@@ -36,6 +51,7 @@\n def create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n+ data.pets = [Pets(id=1, name=\"Fido\"), Pets(id=2, name=\"Spot\")]\n return data\n", "issue": "Docs: DTO Factory exclude fields in nested complex types\n### Summary\n\nNeed to document procedure for excluding fields from nested complex types, e.g.,\r\n\r\n```py\r\nclass Model1:\r\n a: int\r\n b: str\r\n\r\nclass Model2:\r\n c: int\r\n d: str\r\n\r\nclass Model3:\r\n e: Union[Model1, Model2]\r\n\r\nconfig = DTOConfig(exclude={\"e.0.a\", \"e.1.c\"})\r\n...\r\n```\n", "before_files": [{"content": "from datetime import datetime\nfrom uuid import UUID\n\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.orm import Mapped, mapped_column, relationship\nfrom typing_extensions import Annotated\n\nfrom litestar import Litestar, post\nfrom litestar.contrib.sqlalchemy.dto import SQLAlchemyDTO\nfrom litestar.dto.factory import DTOConfig, dto_field\n\nfrom .my_lib import Base\n\n\nclass Address(Base):\n street: Mapped[str]\n city: Mapped[str]\n state: Mapped[str]\n zip: Mapped[str]\n\n\nclass User(Base):\n name: Mapped[str]\n password: Mapped[str] = mapped_column(info=dto_field(\"private\"))\n created_at: Mapped[datetime] = mapped_column(info=dto_field(\"read-only\"))\n address_id: Mapped[UUID] = mapped_column(ForeignKey(\"address.id\"), info=dto_field(\"private\"))\n address: Mapped[Address] = relationship(info=dto_field(\"read-only\"))\n\n\nUserDTO = SQLAlchemyDTO[User]\nconfig = DTOConfig(exclude={\"id\", \"address.id\", \"address.street\"})\nReadUserDTO = SQLAlchemyDTO[Annotated[User, config]]\n\n\n@post(\"/users\", dto=UserDTO, return_dto=ReadUserDTO, sync_to_thread=False)\ndef create_user(data: User) -> User:\n data.created_at = datetime.min\n data.address = Address(street=\"123 Main St\", city=\"Anytown\", state=\"NY\", zip=\"12345\")\n return data\n\n\napp = Litestar(route_handlers=[create_user])\n\n# run: /users -H \"Content-Type: application/json\" -d '{\"name\":\"Litestar User\",\"password\":\"xyz\",\"created_at\":\"2023-04-24T00:00:00Z\"}'\n", "path": "docs/examples/data_transfer_objects/factory/excluding_fields.py"}]}
| 1,113 | 437 |
gh_patches_debug_4423
|
rasdani/github-patches
|
git_diff
|
elastic__ecs-1483
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove `ignore_above` setting when not indexing field
Certain ECS fields are type `keyword` but are also not:
* searchable (`index: false`)
* used in aggregations, sorting, or scripting (`doc_values: false`)
The ECS tooling's behavior is to set `ignore_above: 1024` for all `keyword` fields.
However, `ignore_above` should not be present when `index: false` and `doc_values: false` are set on a field.
</issue>
<code>
[start of scripts/schema/cleaner.py]
1 import copy
2
3 from generators import ecs_helpers
4 from schema import visitor
5
6 # This script performs a few cleanup functions in place, within the deeply nested
7 # 'fields' structure passed to `clean(fields)`.
8 #
9 # What happens here:
10 #
11 # - check that mandatory attributes are present, without which we can't do much.
12 # - cleans things up, like stripping spaces, sorting arrays
13 # - makes lots of defaults explicit
14 # - pre-calculate a few additional helpful fields
15 # - converts shorthands into full representation (e.g. reuse locations)
16 #
17 # This script only deals with field sets themselves and the fields defined
18 # inside them. It doesn't perform field reuse, and therefore doesn't
19 # deal with final field names either.
20
21
22 def clean(fields, strict=False):
23 global strict_mode
24 strict_mode = strict
25 visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
26
27
28 # Schema level cleanup
29
30
31 def schema_cleanup(schema):
32 # Sanity check first
33 schema_mandatory_attributes(schema)
34 # trailing space cleanup
35 ecs_helpers.dict_clean_string_values(schema['schema_details'])
36 ecs_helpers.dict_clean_string_values(schema['field_details'])
37 # Some defaults
38 schema['schema_details'].setdefault('group', 2)
39 schema['schema_details'].setdefault('root', False)
40 schema['field_details'].setdefault('type', 'group')
41 schema['field_details'].setdefault('short', schema['field_details']['description'])
42 if 'reusable' in schema['schema_details']:
43 # order to perform chained reuses. Set to 1 if it needs to happen earlier.
44 schema['schema_details']['reusable'].setdefault('order', 2)
45 # Precalculate stuff. Those can't be set in the YAML.
46 if schema['schema_details']['root']:
47 schema['schema_details']['prefix'] = ''
48 else:
49 schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'
50 normalize_reuse_notation(schema)
51 # Final validity check if in strict mode
52 schema_assertions_and_warnings(schema)
53
54
55 SCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']
56
57
58 def schema_mandatory_attributes(schema):
59 """Ensures for the presence of the mandatory schema attributes and raises if any are missing"""
60 current_schema_attributes = sorted(list(schema['field_details'].keys()) +
61 list(schema['schema_details'].keys()))
62 missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)
63 if len(missing_attributes) > 0:
64 msg = "Schema {} is missing the following mandatory attributes: {}.\nFound these: {}".format(
65 schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)
66 raise ValueError(msg)
67 if 'reusable' in schema['schema_details']:
68 reuse_attributes = sorted(schema['schema_details']['reusable'].keys())
69 missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)
70 if len(missing_reuse_attributes) > 0:
71 msg = "Reusable schema {} is missing the following reuse attributes: {}.\nFound these: {}".format(
72 schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)
73 raise ValueError(msg)
74
75
76 def schema_assertions_and_warnings(schema):
77 """Additional checks on a fleshed out schema"""
78 single_line_short_description(schema, strict=strict_mode)
79 if 'beta' in schema['field_details']:
80 single_line_beta_description(schema, strict=strict_mode)
81 if 'reusable' in schema['schema_details']:
82 single_line_short_override_description(schema, strict=strict_mode)
83
84
85 def normalize_reuse_notation(schema):
86 """
87 Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.
88
89 When marking "user" as reusable under "destination" with the shorthand entry
90 `- destination`, this is expanded to the complete entry
91 `- { "at": "destination", "as": "user" }`.
92 The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.
93
94 The dictionary notation enables nesting a field set as a different name.
95 An example is nesting "process" fields to capture parent process details
96 at `process.parent.*`.
97 The dictionary notation `- { "at": "process", "as": "parent" }` will yield
98 fields such as `process.parent.pid`.
99 """
100 if 'reusable' not in schema['schema_details']:
101 return
102 schema_name = schema['field_details']['name']
103 reuse_entries = []
104 for reuse_entry in schema['schema_details']['reusable']['expected']:
105 if type(reuse_entry) is dict: # Already explicit
106 if 'at' in reuse_entry and 'as' in reuse_entry:
107 explicit_entry = reuse_entry
108 else:
109 raise ValueError("When specifying reusable expected locations for {} " +
110 "with the dictionary notation, keys 'as' and 'at' are required. " +
111 "Got {}.".format(schema_name, reuse_entry))
112 else: # Make it explicit
113 explicit_entry = {'at': reuse_entry, 'as': schema_name}
114 explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']
115 reuse_entries.append(explicit_entry)
116 schema['schema_details']['reusable']['expected'] = reuse_entries
117
118
119 # Field level cleanup
120
121
122 def field_cleanup(field):
123 field_mandatory_attributes(field)
124 if ecs_helpers.is_intermediate(field):
125 return
126 ecs_helpers.dict_clean_string_values(field['field_details'])
127 if 'allowed_values' in field['field_details']:
128 for allowed_value in field['field_details']['allowed_values']:
129 ecs_helpers.dict_clean_string_values(allowed_value)
130 field_defaults(field)
131 field_assertions_and_warnings(field)
132
133
134 def field_defaults(field):
135 field['field_details'].setdefault('short', field['field_details']['description'])
136 field['field_details'].setdefault('normalize', [])
137 field_or_multi_field_datatype_defaults(field['field_details'])
138 if 'multi_fields' in field['field_details']:
139 for mf in field['field_details']['multi_fields']:
140 field_or_multi_field_datatype_defaults(mf)
141 if 'name' not in mf:
142 mf['name'] = mf['type']
143
144
145 def field_or_multi_field_datatype_defaults(field_details):
146 """Sets datatype-related defaults on a canonical field or multi-field entries."""
147 if field_details['type'] == 'keyword':
148 field_details.setdefault('ignore_above', 1024)
149 if field_details['type'] == 'text':
150 field_details.setdefault('norms', False)
151 # wildcard needs the index param stripped
152 if field_details['type'] == 'wildcard':
153 field_details.pop('index', None)
154 if 'index' in field_details and not field_details['index']:
155 field_details.setdefault('doc_values', False)
156
157
158 FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']
159 ACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']
160
161
162 def field_mandatory_attributes(field):
163 """Ensures for the presence of the mandatory field attributes and raises if any are missing"""
164 if ecs_helpers.is_intermediate(field):
165 return
166 current_field_attributes = sorted(field['field_details'].keys())
167 missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)
168
169 # `alias` fields require a target `path` attribute.
170 if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:
171 missing_attributes.append('path')
172 # `scaled_float` fields require a `scaling_factor` attribute.
173 if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:
174 missing_attributes.append('scaling_factor')
175
176 if len(missing_attributes) > 0:
177 msg = "Field is missing the following mandatory attributes: {}.\nFound these: {}.\nField details: {}"
178 raise ValueError(msg.format(', '.join(missing_attributes),
179 current_field_attributes, field))
180
181
182 def field_assertions_and_warnings(field):
183 """Additional checks on a fleshed out field"""
184 if not ecs_helpers.is_intermediate(field):
185 # check short description length if in strict mode
186 single_line_short_description(field, strict=strict_mode)
187 check_example_value(field, strict=strict_mode)
188 if 'beta' in field['field_details']:
189 single_line_beta_description(field, strict=strict_mode)
190 if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:
191 msg = "Invalid level for field '{}'.\nValue: {}\nAcceptable values: {}".format(
192 field['field_details']['name'], field['field_details']['level'],
193 ACCEPTABLE_FIELD_LEVELS)
194 raise ValueError(msg)
195
196 # Common
197
198
199 SHORT_LIMIT = 120
200
201
202 def single_line_short_check(short_to_check, short_name):
203 short_length = len(short_to_check)
204 if "\n" in short_to_check or short_length > SHORT_LIMIT:
205 msg = "Short descriptions must be single line, and under {} characters (current length: {}).\n".format(
206 SHORT_LIMIT, short_length)
207 msg += "Offending field or field set: {}\nShort description:\n {}".format(
208 short_name,
209 short_to_check)
210 return msg
211 return None
212
213
214 def single_line_short_description(schema_or_field, strict=True):
215 error = single_line_short_check(schema_or_field['field_details']['short'], schema_or_field['field_details']['name'])
216 if error:
217 if strict:
218 raise ValueError(error)
219 else:
220 ecs_helpers.strict_warning(error)
221
222
223 def single_line_short_override_description(schema_or_field, strict=True):
224 for field in schema_or_field['schema_details']['reusable']['expected']:
225 if not 'short_override' in field:
226 continue
227 error = single_line_short_check(field['short_override'], field['full'])
228 if error:
229 if strict:
230 raise ValueError(error)
231 else:
232 ecs_helpers.strict_warning(error)
233
234
235 def check_example_value(field, strict=True):
236 """
237 Checks if value of the example field is of type list or dict.
238 Fails or warns (depending on strict mode) if so.
239 """
240 example_value = field['field_details'].get('example', None)
241 if isinstance(example_value, (list, dict)):
242 name = field['field_details']['name']
243 msg = f"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation."
244 if strict:
245 raise ValueError(msg)
246 else:
247 ecs_helpers.strict_warning(msg)
248
249
250 def single_line_beta_description(schema_or_field, strict=True):
251 if "\n" in schema_or_field['field_details']['beta']:
252 msg = "Beta descriptions must be single line.\n"
253 msg += f"Offending field or field set: {schema_or_field['field_details']['name']}"
254 if strict:
255 raise ValueError(msg)
256 else:
257 ecs_helpers.strict_warning(msg)
258
[end of scripts/schema/cleaner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py
--- a/scripts/schema/cleaner.py
+++ b/scripts/schema/cleaner.py
@@ -153,6 +153,7 @@
field_details.pop('index', None)
if 'index' in field_details and not field_details['index']:
field_details.setdefault('doc_values', False)
+ field_details.pop('ignore_above', None)
FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']
|
{"golden_diff": "diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py\n--- a/scripts/schema/cleaner.py\n+++ b/scripts/schema/cleaner.py\n@@ -153,6 +153,7 @@\n field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n+ field_details.pop('ignore_above', None)\n \n \n FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\n", "issue": "Remove `ignore_above` setting when not indexing field\nCertain ECS fields are type `keyword` but are also not:\r\n\r\n* searchable (`index: false`)\r\n* used in aggregations, sorting, or scripting (`doc_values: false`)\r\n\r\nThe ECS tooling's behavior is to set `ignore_above: 1024` for all `keyword` fields.\r\n\r\nHowever, `ignore_above` should not be present when `index: false` and `doc_values: false` are set on a field.\r\n\n", "before_files": [{"content": "import copy\n\nfrom generators import ecs_helpers\nfrom schema import visitor\n\n# This script performs a few cleanup functions in place, within the deeply nested\n# 'fields' structure passed to `clean(fields)`.\n#\n# What happens here:\n#\n# - check that mandatory attributes are present, without which we can't do much.\n# - cleans things up, like stripping spaces, sorting arrays\n# - makes lots of defaults explicit\n# - pre-calculate a few additional helpful fields\n# - converts shorthands into full representation (e.g. reuse locations)\n#\n# This script only deals with field sets themselves and the fields defined\n# inside them. It doesn't perform field reuse, and therefore doesn't\n# deal with final field names either.\n\n\ndef clean(fields, strict=False):\n global strict_mode\n strict_mode = strict\n visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)\n\n\n# Schema level cleanup\n\n\ndef schema_cleanup(schema):\n # Sanity check first\n schema_mandatory_attributes(schema)\n # trailing space cleanup\n ecs_helpers.dict_clean_string_values(schema['schema_details'])\n ecs_helpers.dict_clean_string_values(schema['field_details'])\n # Some defaults\n schema['schema_details'].setdefault('group', 2)\n schema['schema_details'].setdefault('root', False)\n schema['field_details'].setdefault('type', 'group')\n schema['field_details'].setdefault('short', schema['field_details']['description'])\n if 'reusable' in schema['schema_details']:\n # order to perform chained reuses. Set to 1 if it needs to happen earlier.\n schema['schema_details']['reusable'].setdefault('order', 2)\n # Precalculate stuff. Those can't be set in the YAML.\n if schema['schema_details']['root']:\n schema['schema_details']['prefix'] = ''\n else:\n schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'\n normalize_reuse_notation(schema)\n # Final validity check if in strict mode\n schema_assertions_and_warnings(schema)\n\n\nSCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']\n\n\ndef schema_mandatory_attributes(schema):\n \"\"\"Ensures for the presence of the mandatory schema attributes and raises if any are missing\"\"\"\n current_schema_attributes = sorted(list(schema['field_details'].keys()) +\n list(schema['schema_details'].keys()))\n missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)\n if len(missing_attributes) > 0:\n msg = \"Schema {} is missing the following mandatory attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)\n raise ValueError(msg)\n if 'reusable' in schema['schema_details']:\n reuse_attributes = sorted(schema['schema_details']['reusable'].keys())\n missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)\n if len(missing_reuse_attributes) > 0:\n msg = \"Reusable schema {} is missing the following reuse attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)\n raise ValueError(msg)\n\n\ndef schema_assertions_and_warnings(schema):\n \"\"\"Additional checks on a fleshed out schema\"\"\"\n single_line_short_description(schema, strict=strict_mode)\n if 'beta' in schema['field_details']:\n single_line_beta_description(schema, strict=strict_mode)\n if 'reusable' in schema['schema_details']:\n single_line_short_override_description(schema, strict=strict_mode)\n\n\ndef normalize_reuse_notation(schema):\n \"\"\"\n Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.\n\n When marking \"user\" as reusable under \"destination\" with the shorthand entry\n `- destination`, this is expanded to the complete entry\n `- { \"at\": \"destination\", \"as\": \"user\" }`.\n The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.\n\n The dictionary notation enables nesting a field set as a different name.\n An example is nesting \"process\" fields to capture parent process details\n at `process.parent.*`.\n The dictionary notation `- { \"at\": \"process\", \"as\": \"parent\" }` will yield\n fields such as `process.parent.pid`.\n \"\"\"\n if 'reusable' not in schema['schema_details']:\n return\n schema_name = schema['field_details']['name']\n reuse_entries = []\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n if type(reuse_entry) is dict: # Already explicit\n if 'at' in reuse_entry and 'as' in reuse_entry:\n explicit_entry = reuse_entry\n else:\n raise ValueError(\"When specifying reusable expected locations for {} \" +\n \"with the dictionary notation, keys 'as' and 'at' are required. \" +\n \"Got {}.\".format(schema_name, reuse_entry))\n else: # Make it explicit\n explicit_entry = {'at': reuse_entry, 'as': schema_name}\n explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']\n reuse_entries.append(explicit_entry)\n schema['schema_details']['reusable']['expected'] = reuse_entries\n\n\n# Field level cleanup\n\n\ndef field_cleanup(field):\n field_mandatory_attributes(field)\n if ecs_helpers.is_intermediate(field):\n return\n ecs_helpers.dict_clean_string_values(field['field_details'])\n if 'allowed_values' in field['field_details']:\n for allowed_value in field['field_details']['allowed_values']:\n ecs_helpers.dict_clean_string_values(allowed_value)\n field_defaults(field)\n field_assertions_and_warnings(field)\n\n\ndef field_defaults(field):\n field['field_details'].setdefault('short', field['field_details']['description'])\n field['field_details'].setdefault('normalize', [])\n field_or_multi_field_datatype_defaults(field['field_details'])\n if 'multi_fields' in field['field_details']:\n for mf in field['field_details']['multi_fields']:\n field_or_multi_field_datatype_defaults(mf)\n if 'name' not in mf:\n mf['name'] = mf['type']\n\n\ndef field_or_multi_field_datatype_defaults(field_details):\n \"\"\"Sets datatype-related defaults on a canonical field or multi-field entries.\"\"\"\n if field_details['type'] == 'keyword':\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n # wildcard needs the index param stripped\n if field_details['type'] == 'wildcard':\n field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n\n\nFIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\nACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']\n\n\ndef field_mandatory_attributes(field):\n \"\"\"Ensures for the presence of the mandatory field attributes and raises if any are missing\"\"\"\n if ecs_helpers.is_intermediate(field):\n return\n current_field_attributes = sorted(field['field_details'].keys())\n missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)\n\n # `alias` fields require a target `path` attribute.\n if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:\n missing_attributes.append('path')\n # `scaled_float` fields require a `scaling_factor` attribute.\n if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:\n missing_attributes.append('scaling_factor')\n\n if len(missing_attributes) > 0:\n msg = \"Field is missing the following mandatory attributes: {}.\\nFound these: {}.\\nField details: {}\"\n raise ValueError(msg.format(', '.join(missing_attributes),\n current_field_attributes, field))\n\n\ndef field_assertions_and_warnings(field):\n \"\"\"Additional checks on a fleshed out field\"\"\"\n if not ecs_helpers.is_intermediate(field):\n # check short description length if in strict mode\n single_line_short_description(field, strict=strict_mode)\n check_example_value(field, strict=strict_mode)\n if 'beta' in field['field_details']:\n single_line_beta_description(field, strict=strict_mode)\n if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:\n msg = \"Invalid level for field '{}'.\\nValue: {}\\nAcceptable values: {}\".format(\n field['field_details']['name'], field['field_details']['level'],\n ACCEPTABLE_FIELD_LEVELS)\n raise ValueError(msg)\n\n# Common\n\n\nSHORT_LIMIT = 120\n\n\ndef single_line_short_check(short_to_check, short_name):\n short_length = len(short_to_check)\n if \"\\n\" in short_to_check or short_length > SHORT_LIMIT:\n msg = \"Short descriptions must be single line, and under {} characters (current length: {}).\\n\".format(\n SHORT_LIMIT, short_length)\n msg += \"Offending field or field set: {}\\nShort description:\\n {}\".format(\n short_name,\n short_to_check)\n return msg\n return None\n\n\ndef single_line_short_description(schema_or_field, strict=True):\n error = single_line_short_check(schema_or_field['field_details']['short'], schema_or_field['field_details']['name'])\n if error:\n if strict:\n raise ValueError(error)\n else:\n ecs_helpers.strict_warning(error)\n\n\ndef single_line_short_override_description(schema_or_field, strict=True):\n for field in schema_or_field['schema_details']['reusable']['expected']:\n if not 'short_override' in field:\n continue\n error = single_line_short_check(field['short_override'], field['full'])\n if error:\n if strict:\n raise ValueError(error)\n else:\n ecs_helpers.strict_warning(error)\n\n\ndef check_example_value(field, strict=True):\n \"\"\"\n Checks if value of the example field is of type list or dict.\n Fails or warns (depending on strict mode) if so.\n \"\"\"\n example_value = field['field_details'].get('example', None)\n if isinstance(example_value, (list, dict)):\n name = field['field_details']['name']\n msg = f\"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation.\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n\n\ndef single_line_beta_description(schema_or_field, strict=True):\n if \"\\n\" in schema_or_field['field_details']['beta']:\n msg = \"Beta descriptions must be single line.\\n\"\n msg += f\"Offending field or field set: {schema_or_field['field_details']['name']}\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n", "path": "scripts/schema/cleaner.py"}]}
| 3,691 | 114 |
gh_patches_debug_34308
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__aimmo-444
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix aimmo view tests
The new PR on branch `fix_staging` is breaking some tests.
</issue>
<code>
[start of players/views.py]
1 import cPickle as pickle
2 import logging
3 import os
4
5 from django.contrib.auth.decorators import login_required
6 from django.core.exceptions import ValidationError
7 from django.core.urlresolvers import reverse
8 from django.http import HttpResponse, Http404
9 from django.http import JsonResponse
10 from django.shortcuts import redirect, render, get_object_or_404
11 from django.views.decorators.csrf import csrf_exempt
12 from django.views.decorators.http import require_http_methods
13 from django.views.generic import TemplateView
14
15 from models import Avatar, Game, LevelAttempt
16 from players import forms
17 from . import app_settings
18
19 LOGGER = logging.getLogger(__name__)
20
21
22 def _post_code_success_response(message):
23 return _create_response("SUCCESS", message)
24
25
26 def _create_response(status, message):
27 response = {
28 "status": status,
29 "message": message
30 }
31 return JsonResponse(response)
32
33
34 @login_required
35 def code(request, id):
36 game = get_object_or_404(Game, id=id)
37 if not game.can_user_play(request.user):
38 raise Http404
39 try:
40 avatar = game.avatar_set.get(owner=request.user)
41 except Avatar.DoesNotExist:
42 initial_code_file_name = os.path.join(
43 os.path.abspath(os.path.dirname(__file__)),
44 'avatar_examples/dumb_avatar.py',
45 )
46 with open(initial_code_file_name) as initial_code_file:
47 initial_code = initial_code_file.read()
48 avatar = Avatar.objects.create(owner=request.user, code=initial_code,
49 game_id=id)
50 if request.method == 'POST':
51 avatar.code = request.POST['code']
52 avatar.save()
53 return _post_code_success_response('Your code was saved!<br><br><a href="%s">Watch</a>' % reverse('aimmo/watch', kwargs={'id': game.id}))
54 else:
55 return HttpResponse(avatar.code)
56
57
58 def list_games(request):
59 response = {
60 game.pk:
61 {
62 'name': game.name,
63 'settings': pickle.dumps(game.settings_as_dict()),
64 } for game in Game.objects.exclude_inactive()
65 }
66 return JsonResponse(response)
67
68
69 def get_game(request, id):
70 game = get_object_or_404(Game, id=id)
71 response = {
72 'main': {
73 'parameters': [],
74 'main_avatar': None,
75 'users': [],
76 }
77 }
78 for avatar in game.avatar_set.all():
79 if avatar.owner_id == game.main_user_id:
80 response['main']['main_avatar'] = avatar.owner_id
81 response['main']['users'].append({
82 'id': avatar.owner_id,
83 'code': avatar.code,
84 })
85 return JsonResponse(response)
86
87
88 @csrf_exempt
89 @require_http_methods(['POST'])
90 def mark_game_complete(request, id):
91 game = get_object_or_404(Game, id=id)
92 game.completed = True
93 game.static_data = request.body
94 game.save()
95 return HttpResponse('Done!')
96
97
98 class ProgramView(TemplateView):
99 template_name = 'players/program.html'
100
101 def get_context_data(self, **kwargs):
102 context = super(ProgramView, self).get_context_data(**kwargs)
103 game = get_object_or_404(Game, id=self.kwargs['id'])
104 if not game.can_user_play(self.request.user):
105 raise Http404
106 context['game_id'] = int(self.kwargs['id'])
107 return context
108
109
110 def program_level(request, num):
111 try:
112 game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)
113 except Game.DoesNotExist:
114 LOGGER.debug('Adding level')
115 game = _add_and_return_level(num, request.user)
116 LOGGER.debug('Programming game with id %s', game.id)
117 return render(request, 'players/program.html', {'game_id': game.id})
118
119
120 def _render_game(request, game):
121 context = {
122 'current_user_player_key': request.user.pk,
123 'active': game.is_active,
124 'static_data': game.static_data or '{}',
125 }
126 context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)
127 context['game_id'] = game.id
128 return render(request, 'players/viewer.html', context)
129
130
131 def watch_game(request, id):
132 game = get_object_or_404(Game, id=id)
133 if not game.can_user_play(request.user):
134 raise Http404
135 return _render_game(request, game)
136
137
138 def watch_level(request, num):
139 try:
140 game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)
141 except Game.DoesNotExist:
142 LOGGER.debug('Adding level')
143 game = _add_and_return_level(num, request.user)
144 LOGGER.debug('Displaying game with id %s', game.id)
145 return _render_game(request, game)
146
147
148 def _add_and_return_level(num, user):
149 game = Game(generator='Level'+num, name='Level '+num, public=False, main_user=user)
150 try:
151 game.save()
152 except ValidationError as e:
153 LOGGER.warn(e)
154 raise Http404
155 game.can_play = [user]
156 game.save()
157 level_attempt = LevelAttempt(game=game, user=user, level_number=num)
158 level_attempt.save()
159 return game
160
161
162 @login_required
163 def add_game(request):
164 if request.method == 'POST':
165 form = forms.AddGameForm(request.POST)
166 if form.is_valid():
167 game = form.save(commit=False)
168 game.generator = 'Main'
169 game.owner = request.user
170 game.save()
171 return redirect('aimmo/program', id=game.id)
172 else:
173 form = forms.AddGameForm()
174 return render(request, 'players/add_game.html', {'form': form})
175
[end of players/views.py]
[start of players/app_settings.py]
1 from django.conf import settings
2
3 #: URL function for locating the game server, takes one parameter `game`
4 GAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)
5
6 MAX_LEVEL = 1
7
[end of players/app_settings.py]
[start of example_project/example_project/settings.py]
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2015, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 """Django settings for example_project project."""
38 import subprocess
39
40 import os
41
42 ALLOWED_HOSTS = ['*']
43
44 DEBUG = True
45
46 DATABASES = {
47 'default': {
48 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',
49 # 'sqlite3' or 'oracle'.
50 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'),
51 # Or path to database file if using sqlite3.
52 }
53 }
54
55 USE_I18N = True
56 USE_L10N = True
57
58 TIME_ZONE = 'Europe/London'
59 LANGUAGE_CODE = 'en-gb'
60 STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
61 STATIC_URL = '/static/'
62 SECRET_KEY = 'not-a-secret'
63
64 ROOT_URLCONF = 'django_autoconfig.autourlconf'
65
66 WSGI_APPLICATION = 'example_project.wsgi.application'
67
68 INSTALLED_APPS = (
69 'django.contrib.admin',
70 'django.contrib.auth',
71 'django.contrib.contenttypes',
72 'players',
73 'django_forms_bootstrap',
74 )
75
76 LOGGING = {
77 'version': 1,
78 'disable_existing_loggers': False,
79 'filters': {
80 'require_debug_false': {
81 '()': 'django.utils.log.RequireDebugFalse'
82 }
83 },
84 'handlers': {
85 'console': {
86 'level': 'DEBUG',
87 'class': 'logging.StreamHandler'
88 },
89 },
90 'loggers': {
91 'views': {
92 'handlers': ['console'],
93 'level': 'DEBUG'
94 },
95 }
96 }
97
98 LOGIN_URL = '/players/accounts/login/'
99
100 LOGIN_REDIRECT_URL = '/players/'
101
102 MIDDLEWARE_CLASSES = [
103 'django.contrib.sessions.middleware.SessionMiddleware',
104 'django.middleware.locale.LocaleMiddleware',
105 'django.middleware.common.CommonMiddleware',
106 'django.middleware.csrf.CsrfViewMiddleware',
107 'django.contrib.auth.middleware.AuthenticationMiddleware',
108 'django.contrib.messages.middleware.MessageMiddleware',
109 ]
110
111
112 def get_url(game):
113 if os.environ.get('AIMMO_MODE', '') == 'minikube':
114 output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',
115 'game-%s' % game, '--url'])
116 return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game
117 else:
118 return 'http://localhost:%d' % (6001 + int(game) * 1000), ''
119
120
121 AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url
122
123 try:
124 from example_project.local_settings import * # pylint: disable=E0611
125 except ImportError:
126 pass
127
128 from django_autoconfig import autoconfig
129 autoconfig.configure_settings(globals())
130
[end of example_project/example_project/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py
--- a/example_project/example_project/settings.py
+++ b/example_project/example_project/settings.py
@@ -109,16 +109,25 @@
]
-def get_url(game):
+def get_game_url_base_and_path(game):
if os.environ.get('AIMMO_MODE', '') == 'minikube':
output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',
'game-%s' % game, '--url'])
- return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game
+ return 'local.aimmo.codeforlife.education', '/game-%s' % game
else:
- return 'http://localhost:%d' % (6001 + int(game) * 1000), ''
+ return 'localhost', ''
-AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url
+def get_game_port(game):
+ if os.environ.get('AIMMO_MODE', '') == 'minikube':
+ return 0
+
+ return 6001 + int(game) * 1000
+
+
+AIMMO_GAME_SERVER_URL_FUNCTION = get_game_url_base_and_path
+AIMMO_GAME_SERVER_PORT_FUNCTION = get_game_port
+AIMMO_GAME_SERVER_SSL_FLAG = False
try:
from example_project.local_settings import * # pylint: disable=E0611
diff --git a/players/app_settings.py b/players/app_settings.py
--- a/players/app_settings.py
+++ b/players/app_settings.py
@@ -1,6 +1,8 @@
from django.conf import settings
#: URL function for locating the game server, takes one parameter `game`
-GAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)
+GAME_SERVER_URL_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_URL_FUNCTION', None)
+GAME_SERVER_PORT_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_PORT_FUNCTION', None)
+GAME_SERVER_SSL_FLAG = getattr(settings, 'AIMMO_GAME_SERVER_SSL_FLAG', False)
MAX_LEVEL = 1
diff --git a/players/views.py b/players/views.py
--- a/players/views.py
+++ b/players/views.py
@@ -123,7 +123,9 @@
'active': game.is_active,
'static_data': game.static_data or '{}',
}
- context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)
+ context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_URL_FUNCTION(game.id)
+ context['game_url_port'] = app_settings.GAME_SERVER_PORT_FUNCTION(game.id)
+ context['game_ssl_flag'] = app_settings.GAME_SERVER_SSL_FLAG
context['game_id'] = game.id
return render(request, 'players/viewer.html', context)
|
{"golden_diff": "diff --git a/example_project/example_project/settings.py b/example_project/example_project/settings.py\n--- a/example_project/example_project/settings.py\n+++ b/example_project/example_project/settings.py\n@@ -109,16 +109,25 @@\n ]\n \n \n-def get_url(game):\n+def get_game_url_base_and_path(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',\n 'game-%s' % game, '--url'])\n- return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game\n+ return 'local.aimmo.codeforlife.education', '/game-%s' % game\n else:\n- return 'http://localhost:%d' % (6001 + int(game) * 1000), ''\n+ return 'localhost', ''\n \n \n-AIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url\n+def get_game_port(game):\n+ if os.environ.get('AIMMO_MODE', '') == 'minikube':\n+ return 0\n+\n+ return 6001 + int(game) * 1000\n+\n+\n+AIMMO_GAME_SERVER_URL_FUNCTION = get_game_url_base_and_path\n+AIMMO_GAME_SERVER_PORT_FUNCTION = get_game_port\n+AIMMO_GAME_SERVER_SSL_FLAG = False\n \n try:\n from example_project.local_settings import * # pylint: disable=E0611\ndiff --git a/players/app_settings.py b/players/app_settings.py\n--- a/players/app_settings.py\n+++ b/players/app_settings.py\n@@ -1,6 +1,8 @@\n from django.conf import settings\n \n #: URL function for locating the game server, takes one parameter `game`\n-GAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)\n+GAME_SERVER_URL_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_URL_FUNCTION', None)\n+GAME_SERVER_PORT_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_PORT_FUNCTION', None)\n+GAME_SERVER_SSL_FLAG = getattr(settings, 'AIMMO_GAME_SERVER_SSL_FLAG', False)\n \n MAX_LEVEL = 1\ndiff --git a/players/views.py b/players/views.py\n--- a/players/views.py\n+++ b/players/views.py\n@@ -123,7 +123,9 @@\n 'active': game.is_active,\n 'static_data': game.static_data or '{}',\n }\n- context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)\n+ context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_URL_FUNCTION(game.id)\n+ context['game_url_port'] = app_settings.GAME_SERVER_PORT_FUNCTION(game.id)\n+ context['game_ssl_flag'] = app_settings.GAME_SERVER_SSL_FLAG\n context['game_id'] = game.id\n return render(request, 'players/viewer.html', context)\n", "issue": "Fix aimmo view tests \nThe new PR on branch `fix_staging` is breaking some tests.\n", "before_files": [{"content": "import cPickle as pickle\nimport logging\nimport os\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, Http404\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic import TemplateView\n\nfrom models import Avatar, Game, LevelAttempt\nfrom players import forms\nfrom . import app_settings\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _post_code_success_response(message):\n return _create_response(\"SUCCESS\", message)\n\n\ndef _create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request, id):\n game = get_object_or_404(Game, id=id)\n if not game.can_user_play(request.user):\n raise Http404\n try:\n avatar = game.avatar_set.get(owner=request.user)\n except Avatar.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n avatar = Avatar.objects.create(owner=request.user, code=initial_code,\n game_id=id)\n if request.method == 'POST':\n avatar.code = request.POST['code']\n avatar.save()\n return _post_code_success_response('Your code was saved!<br><br><a href=\"%s\">Watch</a>' % reverse('aimmo/watch', kwargs={'id': game.id}))\n else:\n return HttpResponse(avatar.code)\n\n\ndef list_games(request):\n response = {\n game.pk:\n {\n 'name': game.name,\n 'settings': pickle.dumps(game.settings_as_dict()),\n } for game in Game.objects.exclude_inactive()\n }\n return JsonResponse(response)\n\n\ndef get_game(request, id):\n game = get_object_or_404(Game, id=id)\n response = {\n 'main': {\n 'parameters': [],\n 'main_avatar': None,\n 'users': [],\n }\n }\n for avatar in game.avatar_set.all():\n if avatar.owner_id == game.main_user_id:\n response['main']['main_avatar'] = avatar.owner_id\n response['main']['users'].append({\n 'id': avatar.owner_id,\n 'code': avatar.code,\n })\n return JsonResponse(response)\n\n\n@csrf_exempt\n@require_http_methods(['POST'])\ndef mark_game_complete(request, id):\n game = get_object_or_404(Game, id=id)\n game.completed = True\n game.static_data = request.body\n game.save()\n return HttpResponse('Done!')\n\n\nclass ProgramView(TemplateView):\n template_name = 'players/program.html'\n\n def get_context_data(self, **kwargs):\n context = super(ProgramView, self).get_context_data(**kwargs)\n game = get_object_or_404(Game, id=self.kwargs['id'])\n if not game.can_user_play(self.request.user):\n raise Http404\n context['game_id'] = int(self.kwargs['id'])\n return context\n\n\ndef program_level(request, num):\n try:\n game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)\n except Game.DoesNotExist:\n LOGGER.debug('Adding level')\n game = _add_and_return_level(num, request.user)\n LOGGER.debug('Programming game with id %s', game.id)\n return render(request, 'players/program.html', {'game_id': game.id})\n\n\ndef _render_game(request, game):\n context = {\n 'current_user_player_key': request.user.pk,\n 'active': game.is_active,\n 'static_data': game.static_data or '{}',\n }\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION(game.id)\n context['game_id'] = game.id\n return render(request, 'players/viewer.html', context)\n\n\ndef watch_game(request, id):\n game = get_object_or_404(Game, id=id)\n if not game.can_user_play(request.user):\n raise Http404\n return _render_game(request, game)\n\n\ndef watch_level(request, num):\n try:\n game = Game.objects.get(levelattempt__user=request.user, levelattempt__level_number=num)\n except Game.DoesNotExist:\n LOGGER.debug('Adding level')\n game = _add_and_return_level(num, request.user)\n LOGGER.debug('Displaying game with id %s', game.id)\n return _render_game(request, game)\n\n\ndef _add_and_return_level(num, user):\n game = Game(generator='Level'+num, name='Level '+num, public=False, main_user=user)\n try:\n game.save()\n except ValidationError as e:\n LOGGER.warn(e)\n raise Http404\n game.can_play = [user]\n game.save()\n level_attempt = LevelAttempt(game=game, user=user, level_number=num)\n level_attempt.save()\n return game\n\n\n@login_required\ndef add_game(request):\n if request.method == 'POST':\n form = forms.AddGameForm(request.POST)\n if form.is_valid():\n game = form.save(commit=False)\n game.generator = 'Main'\n game.owner = request.user\n game.save()\n return redirect('aimmo/program', id=game.id)\n else:\n form = forms.AddGameForm()\n return render(request, 'players/add_game.html', {'form': form})\n", "path": "players/views.py"}, {"content": "from django.conf import settings\n\n#: URL function for locating the game server, takes one parameter `game`\nGAME_SERVER_LOCATION_FUNCTION = getattr(settings, 'AIMMO_GAME_SERVER_LOCATION_FUNCTION', None)\n\nMAX_LEVEL = 1\n", "path": "players/app_settings.py"}, {"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2015, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\n\"\"\"Django settings for example_project project.\"\"\"\nimport subprocess\n\nimport os\n\nALLOWED_HOSTS = ['*']\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql',\n # 'sqlite3' or 'oracle'.\n 'NAME': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'db.sqlite3'),\n # Or path to database file if using sqlite3.\n }\n}\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\nSTATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')\nSTATIC_URL = '/static/'\nSECRET_KEY = 'not-a-secret'\n\nROOT_URLCONF = 'django_autoconfig.autourlconf'\n\nWSGI_APPLICATION = 'example_project.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'players',\n 'django_forms_bootstrap',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'views': {\n 'handlers': ['console'],\n 'level': 'DEBUG'\n },\n }\n}\n\nLOGIN_URL = '/players/accounts/login/'\n\nLOGIN_REDIRECT_URL = '/players/'\n\nMIDDLEWARE_CLASSES = [\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n]\n\n\ndef get_url(game):\n if os.environ.get('AIMMO_MODE', '') == 'minikube':\n output = subprocess.check_output([os.environ['MINIKUBE_PATH'], 'service',\n 'game-%s' % game, '--url'])\n return 'http://dev.aimmo.codeforlife.education', '/game-%s' % game\n else:\n return 'http://localhost:%d' % (6001 + int(game) * 1000), ''\n\n\nAIMMO_GAME_SERVER_LOCATION_FUNCTION = get_url\n\ntry:\n from example_project.local_settings import * # pylint: disable=E0611\nexcept ImportError:\n pass\n\nfrom django_autoconfig import autoconfig\nautoconfig.configure_settings(globals())\n", "path": "example_project/example_project/settings.py"}]}
| 3,572 | 643 |
gh_patches_debug_36750
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-3761
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add materialize and materialize-incremental rest endpoints
**Is your feature request related to a problem? Please describe.**
In multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.
**Describe the solution you'd like**
adding two endpoints to Python server
**Additional context**
Also we can implement to go server too
Add materialize and materialize-incremental rest endpoints
**Is your feature request related to a problem? Please describe.**
In multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.
**Describe the solution you'd like**
adding two endpoints to Python server
**Additional context**
Also we can implement to go server too
</issue>
<code>
[start of sdk/python/feast/feature_server.py]
1 import json
2 import traceback
3 import warnings
4
5 import gunicorn.app.base
6 import pandas as pd
7 from fastapi import FastAPI, HTTPException, Request, Response, status
8 from fastapi.logger import logger
9 from fastapi.params import Depends
10 from google.protobuf.json_format import MessageToDict, Parse
11 from pydantic import BaseModel
12
13 import feast
14 from feast import proto_json
15 from feast.data_source import PushMode
16 from feast.errors import PushSourceNotFoundException
17 from feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest
18
19
20 # TODO: deprecate this in favor of push features
21 class WriteToFeatureStoreRequest(BaseModel):
22 feature_view_name: str
23 df: dict
24 allow_registry_cache: bool = True
25
26
27 class PushFeaturesRequest(BaseModel):
28 push_source_name: str
29 df: dict
30 allow_registry_cache: bool = True
31 to: str = "online"
32
33
34 def get_app(store: "feast.FeatureStore"):
35 proto_json.patch()
36
37 app = FastAPI()
38
39 async def get_body(request: Request):
40 return await request.body()
41
42 @app.post("/get-online-features")
43 def get_online_features(body=Depends(get_body)):
44 try:
45 # Validate and parse the request data into GetOnlineFeaturesRequest Protobuf object
46 request_proto = GetOnlineFeaturesRequest()
47 Parse(body, request_proto)
48
49 # Initialize parameters for FeatureStore.get_online_features(...) call
50 if request_proto.HasField("feature_service"):
51 features = store.get_feature_service(
52 request_proto.feature_service, allow_cache=True
53 )
54 else:
55 features = list(request_proto.features.val)
56
57 full_feature_names = request_proto.full_feature_names
58
59 batch_sizes = [len(v.val) for v in request_proto.entities.values()]
60 num_entities = batch_sizes[0]
61 if any(batch_size != num_entities for batch_size in batch_sizes):
62 raise HTTPException(status_code=500, detail="Uneven number of columns")
63
64 response_proto = store._get_online_features(
65 features=features,
66 entity_values=request_proto.entities,
67 full_feature_names=full_feature_names,
68 native_entity_values=False,
69 ).proto
70
71 # Convert the Protobuf object to JSON and return it
72 return MessageToDict( # type: ignore
73 response_proto, preserving_proto_field_name=True, float_precision=18
74 )
75 except Exception as e:
76 # Print the original exception on the server side
77 logger.exception(traceback.format_exc())
78 # Raise HTTPException to return the error message to the client
79 raise HTTPException(status_code=500, detail=str(e))
80
81 @app.post("/push")
82 def push(body=Depends(get_body)):
83 try:
84 request = PushFeaturesRequest(**json.loads(body))
85 df = pd.DataFrame(request.df)
86 if request.to == "offline":
87 to = PushMode.OFFLINE
88 elif request.to == "online":
89 to = PushMode.ONLINE
90 elif request.to == "online_and_offline":
91 to = PushMode.ONLINE_AND_OFFLINE
92 else:
93 raise ValueError(
94 f"{request.to} is not a supported push format. Please specify one of these ['online', 'offline', 'online_and_offline']."
95 )
96 store.push(
97 push_source_name=request.push_source_name,
98 df=df,
99 allow_registry_cache=request.allow_registry_cache,
100 to=to,
101 )
102 except PushSourceNotFoundException as e:
103 # Print the original exception on the server side
104 logger.exception(traceback.format_exc())
105 # Raise HTTPException to return the error message to the client
106 raise HTTPException(status_code=422, detail=str(e))
107 except Exception as e:
108 # Print the original exception on the server side
109 logger.exception(traceback.format_exc())
110 # Raise HTTPException to return the error message to the client
111 raise HTTPException(status_code=500, detail=str(e))
112
113 @app.post("/write-to-online-store")
114 def write_to_online_store(body=Depends(get_body)):
115 warnings.warn(
116 "write_to_online_store is deprecated. Please consider using /push instead",
117 RuntimeWarning,
118 )
119 try:
120 request = WriteToFeatureStoreRequest(**json.loads(body))
121 df = pd.DataFrame(request.df)
122 store.write_to_online_store(
123 feature_view_name=request.feature_view_name,
124 df=df,
125 allow_registry_cache=request.allow_registry_cache,
126 )
127 except Exception as e:
128 # Print the original exception on the server side
129 logger.exception(traceback.format_exc())
130 # Raise HTTPException to return the error message to the client
131 raise HTTPException(status_code=500, detail=str(e))
132
133 @app.get("/health")
134 def health():
135 return Response(status_code=status.HTTP_200_OK)
136
137 return app
138
139
140 class FeastServeApplication(gunicorn.app.base.BaseApplication):
141 def __init__(self, store: "feast.FeatureStore", **options):
142 self._app = get_app(store=store)
143 self._options = options
144 super().__init__()
145
146 def load_config(self):
147 for key, value in self._options.items():
148 if key.lower() in self.cfg.settings and value is not None:
149 self.cfg.set(key.lower(), value)
150
151 self.cfg.set("worker_class", "uvicorn.workers.UvicornWorker")
152
153 def load(self):
154 return self._app
155
156
157 def start_server(
158 store: "feast.FeatureStore",
159 host: str,
160 port: int,
161 no_access_log: bool,
162 workers: int,
163 keep_alive_timeout: int,
164 ):
165 FeastServeApplication(
166 store=store,
167 bind=f"{host}:{port}",
168 accesslog=None if no_access_log else "-",
169 workers=workers,
170 keepalive=keep_alive_timeout,
171 ).run()
172
[end of sdk/python/feast/feature_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/feast/feature_server.py b/sdk/python/feast/feature_server.py
--- a/sdk/python/feast/feature_server.py
+++ b/sdk/python/feast/feature_server.py
@@ -1,9 +1,11 @@
import json
import traceback
import warnings
+from typing import List, Optional
import gunicorn.app.base
import pandas as pd
+from dateutil import parser
from fastapi import FastAPI, HTTPException, Request, Response, status
from fastapi.logger import logger
from fastapi.params import Depends
@@ -11,7 +13,7 @@
from pydantic import BaseModel
import feast
-from feast import proto_json
+from feast import proto_json, utils
from feast.data_source import PushMode
from feast.errors import PushSourceNotFoundException
from feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest
@@ -31,6 +33,17 @@
to: str = "online"
+class MaterializeRequest(BaseModel):
+ start_ts: str
+ end_ts: str
+ feature_views: Optional[List[str]] = None
+
+
+class MaterializeIncrementalRequest(BaseModel):
+ end_ts: str
+ feature_views: Optional[List[str]] = None
+
+
def get_app(store: "feast.FeatureStore"):
proto_json.patch()
@@ -134,6 +147,34 @@
def health():
return Response(status_code=status.HTTP_200_OK)
+ @app.post("/materialize")
+ def materialize(body=Depends(get_body)):
+ try:
+ request = MaterializeRequest(**json.loads(body))
+ store.materialize(
+ utils.make_tzaware(parser.parse(request.start_ts)),
+ utils.make_tzaware(parser.parse(request.end_ts)),
+ request.feature_views,
+ )
+ except Exception as e:
+ # Print the original exception on the server side
+ logger.exception(traceback.format_exc())
+ # Raise HTTPException to return the error message to the client
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @app.post("/materialize-incremental")
+ def materialize_incremental(body=Depends(get_body)):
+ try:
+ request = MaterializeIncrementalRequest(**json.loads(body))
+ store.materialize_incremental(
+ utils.make_tzaware(parser.parse(request.end_ts)), request.feature_views
+ )
+ except Exception as e:
+ # Print the original exception on the server side
+ logger.exception(traceback.format_exc())
+ # Raise HTTPException to return the error message to the client
+ raise HTTPException(status_code=500, detail=str(e))
+
return app
|
{"golden_diff": "diff --git a/sdk/python/feast/feature_server.py b/sdk/python/feast/feature_server.py\n--- a/sdk/python/feast/feature_server.py\n+++ b/sdk/python/feast/feature_server.py\n@@ -1,9 +1,11 @@\n import json\n import traceback\n import warnings\n+from typing import List, Optional\n \n import gunicorn.app.base\n import pandas as pd\n+from dateutil import parser\n from fastapi import FastAPI, HTTPException, Request, Response, status\n from fastapi.logger import logger\n from fastapi.params import Depends\n@@ -11,7 +13,7 @@\n from pydantic import BaseModel\n \n import feast\n-from feast import proto_json\n+from feast import proto_json, utils\n from feast.data_source import PushMode\n from feast.errors import PushSourceNotFoundException\n from feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest\n@@ -31,6 +33,17 @@\n to: str = \"online\"\n \n \n+class MaterializeRequest(BaseModel):\n+ start_ts: str\n+ end_ts: str\n+ feature_views: Optional[List[str]] = None\n+\n+\n+class MaterializeIncrementalRequest(BaseModel):\n+ end_ts: str\n+ feature_views: Optional[List[str]] = None\n+\n+\n def get_app(store: \"feast.FeatureStore\"):\n proto_json.patch()\n \n@@ -134,6 +147,34 @@\n def health():\n return Response(status_code=status.HTTP_200_OK)\n \n+ @app.post(\"/materialize\")\n+ def materialize(body=Depends(get_body)):\n+ try:\n+ request = MaterializeRequest(**json.loads(body))\n+ store.materialize(\n+ utils.make_tzaware(parser.parse(request.start_ts)),\n+ utils.make_tzaware(parser.parse(request.end_ts)),\n+ request.feature_views,\n+ )\n+ except Exception as e:\n+ # Print the original exception on the server side\n+ logger.exception(traceback.format_exc())\n+ # Raise HTTPException to return the error message to the client\n+ raise HTTPException(status_code=500, detail=str(e))\n+\n+ @app.post(\"/materialize-incremental\")\n+ def materialize_incremental(body=Depends(get_body)):\n+ try:\n+ request = MaterializeIncrementalRequest(**json.loads(body))\n+ store.materialize_incremental(\n+ utils.make_tzaware(parser.parse(request.end_ts)), request.feature_views\n+ )\n+ except Exception as e:\n+ # Print the original exception on the server side\n+ logger.exception(traceback.format_exc())\n+ # Raise HTTPException to return the error message to the client\n+ raise HTTPException(status_code=500, detail=str(e))\n+\n return app\n", "issue": "Add materialize and materialize-incremental rest endpoints\n**Is your feature request related to a problem? Please describe.**\r\nIn multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.\r\n\r\n**Describe the solution you'd like**\r\nadding two endpoints to Python server\r\n\r\n**Additional context**\r\nAlso we can implement to go server too\r\n\nAdd materialize and materialize-incremental rest endpoints\n**Is your feature request related to a problem? Please describe.**\r\nIn multi clustered environments when managing all cronjobs with cron orchestrator tools facing restricted network access to some resources. We use the Rest interface for most important CLI commands as materialize and materialize-incremental.\r\n\r\n**Describe the solution you'd like**\r\nadding two endpoints to Python server\r\n\r\n**Additional context**\r\nAlso we can implement to go server too\r\n\n", "before_files": [{"content": "import json\nimport traceback\nimport warnings\n\nimport gunicorn.app.base\nimport pandas as pd\nfrom fastapi import FastAPI, HTTPException, Request, Response, status\nfrom fastapi.logger import logger\nfrom fastapi.params import Depends\nfrom google.protobuf.json_format import MessageToDict, Parse\nfrom pydantic import BaseModel\n\nimport feast\nfrom feast import proto_json\nfrom feast.data_source import PushMode\nfrom feast.errors import PushSourceNotFoundException\nfrom feast.protos.feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest\n\n\n# TODO: deprecate this in favor of push features\nclass WriteToFeatureStoreRequest(BaseModel):\n feature_view_name: str\n df: dict\n allow_registry_cache: bool = True\n\n\nclass PushFeaturesRequest(BaseModel):\n push_source_name: str\n df: dict\n allow_registry_cache: bool = True\n to: str = \"online\"\n\n\ndef get_app(store: \"feast.FeatureStore\"):\n proto_json.patch()\n\n app = FastAPI()\n\n async def get_body(request: Request):\n return await request.body()\n\n @app.post(\"/get-online-features\")\n def get_online_features(body=Depends(get_body)):\n try:\n # Validate and parse the request data into GetOnlineFeaturesRequest Protobuf object\n request_proto = GetOnlineFeaturesRequest()\n Parse(body, request_proto)\n\n # Initialize parameters for FeatureStore.get_online_features(...) call\n if request_proto.HasField(\"feature_service\"):\n features = store.get_feature_service(\n request_proto.feature_service, allow_cache=True\n )\n else:\n features = list(request_proto.features.val)\n\n full_feature_names = request_proto.full_feature_names\n\n batch_sizes = [len(v.val) for v in request_proto.entities.values()]\n num_entities = batch_sizes[0]\n if any(batch_size != num_entities for batch_size in batch_sizes):\n raise HTTPException(status_code=500, detail=\"Uneven number of columns\")\n\n response_proto = store._get_online_features(\n features=features,\n entity_values=request_proto.entities,\n full_feature_names=full_feature_names,\n native_entity_values=False,\n ).proto\n\n # Convert the Protobuf object to JSON and return it\n return MessageToDict( # type: ignore\n response_proto, preserving_proto_field_name=True, float_precision=18\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/push\")\n def push(body=Depends(get_body)):\n try:\n request = PushFeaturesRequest(**json.loads(body))\n df = pd.DataFrame(request.df)\n if request.to == \"offline\":\n to = PushMode.OFFLINE\n elif request.to == \"online\":\n to = PushMode.ONLINE\n elif request.to == \"online_and_offline\":\n to = PushMode.ONLINE_AND_OFFLINE\n else:\n raise ValueError(\n f\"{request.to} is not a supported push format. Please specify one of these ['online', 'offline', 'online_and_offline'].\"\n )\n store.push(\n push_source_name=request.push_source_name,\n df=df,\n allow_registry_cache=request.allow_registry_cache,\n to=to,\n )\n except PushSourceNotFoundException as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=422, detail=str(e))\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.post(\"/write-to-online-store\")\n def write_to_online_store(body=Depends(get_body)):\n warnings.warn(\n \"write_to_online_store is deprecated. Please consider using /push instead\",\n RuntimeWarning,\n )\n try:\n request = WriteToFeatureStoreRequest(**json.loads(body))\n df = pd.DataFrame(request.df)\n store.write_to_online_store(\n feature_view_name=request.feature_view_name,\n df=df,\n allow_registry_cache=request.allow_registry_cache,\n )\n except Exception as e:\n # Print the original exception on the server side\n logger.exception(traceback.format_exc())\n # Raise HTTPException to return the error message to the client\n raise HTTPException(status_code=500, detail=str(e))\n\n @app.get(\"/health\")\n def health():\n return Response(status_code=status.HTTP_200_OK)\n\n return app\n\n\nclass FeastServeApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, store: \"feast.FeatureStore\", **options):\n self._app = get_app(store=store)\n self._options = options\n super().__init__()\n\n def load_config(self):\n for key, value in self._options.items():\n if key.lower() in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n self.cfg.set(\"worker_class\", \"uvicorn.workers.UvicornWorker\")\n\n def load(self):\n return self._app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n no_access_log: bool,\n workers: int,\n keep_alive_timeout: int,\n):\n FeastServeApplication(\n store=store,\n bind=f\"{host}:{port}\",\n accesslog=None if no_access_log else \"-\",\n workers=workers,\n keepalive=keep_alive_timeout,\n ).run()\n", "path": "sdk/python/feast/feature_server.py"}]}
| 2,396 | 609 |
gh_patches_debug_18488
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-1054
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] [Hydra-Submitit-Launcher] Add support for passing `additional_parameters` as supported by Submititi
# 🚀 Feature Request
When calling [Submitit](https://github.com/facebookincubator/submitit) directly (not the plugin), one can pass an optional dictionary argument called [`additional_parameters`](https://github.com/facebookincubator/submitit/blob/e6fc6b2532a2ea480f08747816f14c53f526ef04/submitit/slurm/slurm.py#L432) to add parameters which are not currently available in submitit.
This argument can not be passed when using the submitit-plugin. Example error:
```
Key 'additional_parameters' not in 'SlurmQueueConf'
full_key: hydra.launcher.additional_parameters
reference_type=Optional[SlurmQueueConf]
object_type=SlurmQueueConf
```
We should add support for passing this `additional_parameters` dict via the plugin aswell.
## Motivation
Same as above.
## Pitch
Add support for the dict in [SlurmQueueConf](https://github.com/facebookresearch/hydra/blob/master/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py#L31) and pass to submitit.AutoExecutor [here](https://github.com/facebookresearch/hydra/blob/dfbebc34020ad3d4bf1ab061565408740e62eb22/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/submitit_launcher.py#L105)
**Are you willing to open a pull request?** (See [CONTRIBUTING](../../CONTRIBUTING.md)): Yes
## Additional context
Since this change is quite small, is there any reason it wasnt done in the first iteration of the plugin?
</issue>
<code>
[start of plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass
3 from typing import Optional
4
5 from hydra.core.config_store import ConfigStore
6
7
8 @dataclass
9 class BaseQueueConf:
10 """Configuration shared by all executors"""
11
12 submitit_folder: str = "${hydra.sweep.dir}/.submitit/%j"
13
14 # maximum time for the job in minutes
15 timeout_min: int = 60
16 # number of cpus to use for each task
17 cpus_per_task: int = 1
18 # number of gpus to use on each node
19 gpus_per_node: int = 0
20 # number of tasks to spawn on each node
21 tasks_per_node: int = 1
22 # memory to reserve for the job on each node (in GB)
23 mem_gb: int = 4
24 # number of nodes to use for the job
25 nodes: int = 1
26 # name of the job
27 name: str = "${hydra.job.name}"
28
29
30 @dataclass
31 class SlurmQueueConf(BaseQueueConf):
32 """Slurm configuration overrides and specific parameters"""
33
34 _target_: str = (
35 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher"
36 )
37
38 # Params are used to configure sbatch, for more info check:
39 # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py
40
41 # Following parameters are slurm specific
42 # More information: https://slurm.schedmd.com/sbatch.html
43 #
44 # slurm partition to use on the cluster
45 partition: Optional[str] = None
46 comment: Optional[str] = None
47 constraint: Optional[str] = None
48 exclude: Optional[str] = None
49
50 # Following parameters are submitit specifics
51 #
52 # USR1 signal delay before timeout
53 signal_delay_s: int = 120
54 # Maximum number of retries on job timeout.
55 # Change this only after you confirmed your code can handle re-submission
56 # by properly resuming from the latest stored checkpoint.
57 # check the following for more info on slurm_max_num_timeout
58 # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
59 max_num_timeout: int = 0
60
61
62 @dataclass
63 class LocalQueueConf(BaseQueueConf):
64 _target_: str = (
65 "hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher"
66 )
67
68
69 # finally, register two different choices:
70 ConfigStore.instance().store(
71 group="hydra/launcher",
72 name="submitit_local",
73 node=LocalQueueConf(),
74 provider="submitit_launcher",
75 )
76
77
78 ConfigStore.instance().store(
79 group="hydra/launcher",
80 name="submitit_slurm",
81 node=SlurmQueueConf(),
82 provider="submitit_launcher",
83 )
84
[end of plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py
@@ -1,6 +1,6 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-from dataclasses import dataclass
-from typing import Optional
+from dataclasses import dataclass, field
+from typing import Any, Dict, Optional
from hydra.core.config_store import ConfigStore
@@ -57,6 +57,9 @@
# check the following for more info on slurm_max_num_timeout
# https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md
max_num_timeout: int = 0
+ # Useful to add parameters which are not currently available in the plugin.
+ # Eg: {"mail-user": "[email protected]", "mail-type": "BEGIN"}
+ additional_parameters: Dict[str, Any] = field(default_factory=dict)
@dataclass
|
{"golden_diff": "diff --git a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n--- a/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n+++ b/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py\n@@ -1,6 +1,6 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n-from dataclasses import dataclass\n-from typing import Optional\n+from dataclasses import dataclass, field\n+from typing import Any, Dict, Optional\n \n from hydra.core.config_store import ConfigStore\n \n@@ -57,6 +57,9 @@\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n+ # Useful to add parameters which are not currently available in the plugin.\n+ # Eg: {\"mail-user\": \"[email protected]\", \"mail-type\": \"BEGIN\"}\n+ additional_parameters: Dict[str, Any] = field(default_factory=dict)\n \n \n @dataclass\n", "issue": "[Feature Request] [Hydra-Submitit-Launcher] Add support for passing `additional_parameters` as supported by Submititi\n# \ud83d\ude80 Feature Request\r\n\r\nWhen calling [Submitit](https://github.com/facebookincubator/submitit) directly (not the plugin), one can pass an optional dictionary argument called [`additional_parameters`](https://github.com/facebookincubator/submitit/blob/e6fc6b2532a2ea480f08747816f14c53f526ef04/submitit/slurm/slurm.py#L432) to add parameters which are not currently available in submitit.\r\n\r\nThis argument can not be passed when using the submitit-plugin. Example error:\r\n\r\n```\r\nKey 'additional_parameters' not in 'SlurmQueueConf'\r\n full_key: hydra.launcher.additional_parameters\r\n reference_type=Optional[SlurmQueueConf]\r\n object_type=SlurmQueueConf\r\n```\r\n\r\nWe should add support for passing this `additional_parameters` dict via the plugin aswell.\r\n\r\n## Motivation\r\n\r\nSame as above.\r\n\r\n## Pitch\r\n\r\nAdd support for the dict in [SlurmQueueConf](https://github.com/facebookresearch/hydra/blob/master/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py#L31) and pass to submitit.AutoExecutor [here](https://github.com/facebookresearch/hydra/blob/dfbebc34020ad3d4bf1ab061565408740e62eb22/plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/submitit_launcher.py#L105)\r\n\r\n**Are you willing to open a pull request?** (See [CONTRIBUTING](../../CONTRIBUTING.md)): Yes\r\n\r\n## Additional context\r\n\r\nSince this change is quite small, is there any reason it wasnt done in the first iteration of the plugin?\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\n\n\n@dataclass\nclass BaseQueueConf:\n \"\"\"Configuration shared by all executors\"\"\"\n\n submitit_folder: str = \"${hydra.sweep.dir}/.submitit/%j\"\n\n # maximum time for the job in minutes\n timeout_min: int = 60\n # number of cpus to use for each task\n cpus_per_task: int = 1\n # number of gpus to use on each node\n gpus_per_node: int = 0\n # number of tasks to spawn on each node\n tasks_per_node: int = 1\n # memory to reserve for the job on each node (in GB)\n mem_gb: int = 4\n # number of nodes to use for the job\n nodes: int = 1\n # name of the job\n name: str = \"${hydra.job.name}\"\n\n\n@dataclass\nclass SlurmQueueConf(BaseQueueConf):\n \"\"\"Slurm configuration overrides and specific parameters\"\"\"\n\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.SlurmLauncher\"\n )\n\n # Params are used to configure sbatch, for more info check:\n # https://github.com/facebookincubator/submitit/blob/master/submitit/slurm/slurm.py\n\n # Following parameters are slurm specific\n # More information: https://slurm.schedmd.com/sbatch.html\n #\n # slurm partition to use on the cluster\n partition: Optional[str] = None\n comment: Optional[str] = None\n constraint: Optional[str] = None\n exclude: Optional[str] = None\n\n # Following parameters are submitit specifics\n #\n # USR1 signal delay before timeout\n signal_delay_s: int = 120\n # Maximum number of retries on job timeout.\n # Change this only after you confirmed your code can handle re-submission\n # by properly resuming from the latest stored checkpoint.\n # check the following for more info on slurm_max_num_timeout\n # https://github.com/facebookincubator/submitit/blob/master/docs/checkpointing.md\n max_num_timeout: int = 0\n\n\n@dataclass\nclass LocalQueueConf(BaseQueueConf):\n _target_: str = (\n \"hydra_plugins.hydra_submitit_launcher.submitit_launcher.LocalLauncher\"\n )\n\n\n# finally, register two different choices:\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_local\",\n node=LocalQueueConf(),\n provider=\"submitit_launcher\",\n)\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\",\n name=\"submitit_slurm\",\n node=SlurmQueueConf(),\n provider=\"submitit_launcher\",\n)\n", "path": "plugins/hydra_submitit_launcher/hydra_plugins/hydra_submitit_launcher/config.py"}]}
| 1,787 | 276 |
gh_patches_debug_15154
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-5926
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Two models one code name
### Willingness to contribute
Yes. I can contribute a fix for this bug independently.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: custom code
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: 20.04
- **MLflow installed from (source or binary)**: source
- **MLflow version (run ``mlflow --version``)**: 1.23.1
- **Python version**: 3.9
- **yarn version, if running the dev UI**: N/A
### Describe the problem
When two mlflow models have the same name for the code packaged with them, for example: a preprocessing code name `prep_features` was packaged with two different mlflow models. If the first mlflow model is loaded, then the second one is also loaded, python will only load the `prep_features` module once. This leads to problems if `prep_features` for the second model is different from the first model.
### Tracking information
_No response_
### Code to reproduce issue
prep_features. py change 1 to 2 when creating custom_model2 with the 2m1cn.py script
```python
def preprocess_features():
return 1 # 2 for custom_model2
```
2m1cn.py use this file to create custom_model1 and custom_model2
```python
import mlflow.pyfunc
class CM(mlflow.pyfunc.PythonModel):
def __init__(self):
pass
def predict(self, context, model_input):
from prep_features import preprocess_features
return preprocess_features()
# Construct and save the model
model_path = "custom_model1" # "custom_model2"
code_path = ["prep_features.py"]
custom_model = CM()
mlflow.pyfunc.save_model(path=model_path, python_model=custom_model, code_path=code_path)
```
test.py now use this file to test your created models
```python
# Evaluate the models
import mlflow.pyfunc
import pandas as pd
model_input = pd.DataFrame([range(10)])
loaded_model1 = mlflow.pyfunc.load_model("custom_model1")
print(f"Model1: {loaded_model1.predict(model_input)}")
loaded_model2 = mlflow.pyfunc.load_model("custom_model2")
print(f"Model2: {loaded_model2.predict(model_input)}")
```
The output should be 1 for custom_model1 and 2 for custom_model 2. But because of the bug the output is 1 for both.
### Other info / logs
_No response_
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [X] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [ ] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
</issue>
<code>
[start of mlflow/utils/model_utils.py]
1 import os
2 import sys
3
4 from mlflow.exceptions import MlflowException
5 from mlflow.models import Model
6 from mlflow.models.model import MLMODEL_FILE_NAME
7 from mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, RESOURCE_ALREADY_EXISTS
8 from mlflow.tracking.artifact_utils import _download_artifact_from_uri
9 from mlflow.utils.uri import append_to_uri_path
10 from mlflow.utils.file_utils import _copy_file_or_tree
11
12 FLAVOR_CONFIG_CODE = "code"
13
14
15 def _get_flavor_configuration(model_path, flavor_name):
16 """
17 Obtains the configuration for the specified flavor from the specified
18 MLflow model path. If the model does not contain the specified flavor,
19 an exception will be thrown.
20
21 :param model_path: The path to the root directory of the MLflow model for which to load
22 the specified flavor configuration.
23 :param flavor_name: The name of the flavor configuration to load.
24 :return: The flavor configuration as a dictionary.
25 """
26 model_configuration_path = os.path.join(model_path, MLMODEL_FILE_NAME)
27 if not os.path.exists(model_configuration_path):
28 raise MlflowException(
29 'Could not find an "{model_file}" configuration file at "{model_path}"'.format(
30 model_file=MLMODEL_FILE_NAME, model_path=model_path
31 ),
32 RESOURCE_DOES_NOT_EXIST,
33 )
34
35 model_conf = Model.load(model_configuration_path)
36 if flavor_name not in model_conf.flavors:
37 raise MlflowException(
38 'Model does not have the "{flavor_name}" flavor'.format(flavor_name=flavor_name),
39 RESOURCE_DOES_NOT_EXIST,
40 )
41 conf = model_conf.flavors[flavor_name]
42 return conf
43
44
45 def _get_flavor_configuration_from_uri(model_uri, flavor_name):
46 """
47 Obtains the configuration for the specified flavor from the specified
48 MLflow model uri. If the model does not contain the specified flavor,
49 an exception will be thrown.
50
51 :param model_uri: The path to the root directory of the MLflow model for which to load
52 the specified flavor configuration.
53 :param flavor_name: The name of the flavor configuration to load.
54 :return: The flavor configuration as a dictionary.
55 """
56 try:
57 ml_model_file = _download_artifact_from_uri(
58 artifact_uri=append_to_uri_path(model_uri, MLMODEL_FILE_NAME)
59 )
60 except Exception as ex:
61 raise MlflowException(
62 'Failed to download an "{model_file}" model file from "{model_uri}": {ex}'.format(
63 model_file=MLMODEL_FILE_NAME, model_uri=model_uri, ex=ex
64 ),
65 RESOURCE_DOES_NOT_EXIST,
66 )
67 model_conf = Model.load(ml_model_file)
68 if flavor_name not in model_conf.flavors:
69 raise MlflowException(
70 'Model does not have the "{flavor_name}" flavor'.format(flavor_name=flavor_name),
71 RESOURCE_DOES_NOT_EXIST,
72 )
73 return model_conf.flavors[flavor_name]
74
75
76 def _get_code_dirs(src_code_path, dst_code_path=None):
77 """
78 Obtains the names of the subdirectories contained under the specified source code
79 path and joins them with the specified destination code path.
80 :param src_code_path: The path of the source code directory for which to list subdirectories.
81 :param dst_code_path: The destination directory path to which subdirectory names should be
82 joined.
83 """
84 if not dst_code_path:
85 dst_code_path = src_code_path
86 return [
87 (os.path.join(dst_code_path, x))
88 for x in os.listdir(src_code_path)
89 if os.path.isdir(os.path.join(src_code_path, x)) and not x == "__pycache__"
90 ]
91
92
93 def _validate_code_paths(code_paths):
94 if code_paths is not None:
95 if not isinstance(code_paths, list):
96 raise TypeError("Argument code_paths should be a list, not {}".format(type(code_paths)))
97
98
99 def _validate_and_copy_code_paths(code_paths, path, default_subpath="code"):
100 """
101 Validates that a code path is a valid list and copies the code paths to a directory. This
102 can later be used to log custom code as an artifact.
103
104 :param code_paths: A list of files or directories containing code that should be logged
105 as artifacts
106 :param path: The local model path.
107 :param default_subpath: The default directory name used to store code artifacts.
108 """
109 _validate_code_paths(code_paths)
110 if code_paths is not None:
111 code_dir_subpath = default_subpath
112 for code_path in code_paths:
113 _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)
114 else:
115 code_dir_subpath = None
116 return code_dir_subpath
117
118
119 def _add_code_to_system_path(code_path):
120 sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
121
122
123 def _validate_and_prepare_target_save_path(path):
124 if os.path.exists(path) and any(os.scandir(path)):
125 raise MlflowException(
126 message="Path '{}' already exists and is not empty".format(path),
127 error_code=RESOURCE_ALREADY_EXISTS,
128 )
129
130 os.makedirs(path, exist_ok=True)
131
132
133 def _add_code_from_conf_to_system_path(local_path, conf, code_key=FLAVOR_CONFIG_CODE):
134 """
135 Checks if any code_paths were logged with the model in the flavor conf and prepends
136 the directory to the system path.
137
138 :param local_path: The local path containing model artifacts.
139 :param conf: The flavor-specific conf that should contain the FLAVOR_CONFIG_CODE
140 key, which specifies the directory containing custom code logged as artifacts.
141 :param code_key: The key used by the flavor to indicate custom code artifacts.
142 By default this is FLAVOR_CONFIG_CODE.
143 """
144 if code_key in conf and conf[code_key]:
145 code_path = os.path.join(local_path, conf[code_key])
146 _add_code_to_system_path(code_path)
147
[end of mlflow/utils/model_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlflow/utils/model_utils.py b/mlflow/utils/model_utils.py
--- a/mlflow/utils/model_utils.py
+++ b/mlflow/utils/model_utils.py
@@ -1,5 +1,6 @@
import os
import sys
+from pathlib import Path
from mlflow.exceptions import MlflowException
from mlflow.models import Model
@@ -118,6 +119,13 @@
def _add_code_to_system_path(code_path):
sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
+ # Delete cached modules so they will get reloaded anew from the correct code path
+ # Otherwise python will use the cached modules
+ modules = [
+ p.stem for p in Path(code_path).rglob("*.py") if p.is_file() and p.name != "__init__.py"
+ ]
+ for module in modules:
+ sys.modules.pop(module, None)
def _validate_and_prepare_target_save_path(path):
|
{"golden_diff": "diff --git a/mlflow/utils/model_utils.py b/mlflow/utils/model_utils.py\n--- a/mlflow/utils/model_utils.py\n+++ b/mlflow/utils/model_utils.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+from pathlib import Path\n \n from mlflow.exceptions import MlflowException\n from mlflow.models import Model\n@@ -118,6 +119,13 @@\n \n def _add_code_to_system_path(code_path):\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n+ # Delete cached modules so they will get reloaded anew from the correct code path\n+ # Otherwise python will use the cached modules\n+ modules = [\n+ p.stem for p in Path(code_path).rglob(\"*.py\") if p.is_file() and p.name != \"__init__.py\"\n+ ]\n+ for module in modules:\n+ sys.modules.pop(module, None)\n \n \n def _validate_and_prepare_target_save_path(path):\n", "issue": "Two models one code name \n### Willingness to contribute\r\n\r\nYes. I can contribute a fix for this bug independently.\r\n\r\n### System information\r\n\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: custom code\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: 20.04\r\n- **MLflow installed from (source or binary)**: source\r\n- **MLflow version (run ``mlflow --version``)**: 1.23.1\r\n- **Python version**: 3.9\r\n- **yarn version, if running the dev UI**: N/A\r\n\r\n\r\n### Describe the problem\r\n\r\nWhen two mlflow models have the same name for the code packaged with them, for example: a preprocessing code name `prep_features` was packaged with two different mlflow models. If the first mlflow model is loaded, then the second one is also loaded, python will only load the `prep_features` module once. This leads to problems if `prep_features` for the second model is different from the first model.\r\n\r\n\r\n\r\n### Tracking information\r\n\r\n_No response_\r\n\r\n### Code to reproduce issue\r\n\r\nprep_features. py change 1 to 2 when creating custom_model2 with the 2m1cn.py script\r\n```python\r\ndef preprocess_features():\r\n return 1 # 2 for custom_model2\r\n```\r\n2m1cn.py use this file to create custom_model1 and custom_model2 \r\n```python\r\nimport mlflow.pyfunc\r\n\r\nclass CM(mlflow.pyfunc.PythonModel):\r\n def __init__(self):\r\n pass\r\n def predict(self, context, model_input):\r\n from prep_features import preprocess_features\r\n return preprocess_features()\r\n\r\n# Construct and save the model\r\nmodel_path = \"custom_model1\" # \"custom_model2\"\r\ncode_path = [\"prep_features.py\"]\r\ncustom_model = CM()\r\nmlflow.pyfunc.save_model(path=model_path, python_model=custom_model, code_path=code_path)\r\n```\r\n\r\ntest.py now use this file to test your created models\r\n```python\r\n# Evaluate the models\r\nimport mlflow.pyfunc\r\nimport pandas as pd\r\nmodel_input = pd.DataFrame([range(10)])\r\nloaded_model1 = mlflow.pyfunc.load_model(\"custom_model1\")\r\nprint(f\"Model1: {loaded_model1.predict(model_input)}\")\r\nloaded_model2 = mlflow.pyfunc.load_model(\"custom_model2\")\r\nprint(f\"Model2: {loaded_model2.predict(model_input)}\")\r\n```\r\n\r\nThe output should be 1 for custom_model1 and 2 for custom_model 2. But because of the bug the output is 1 for both.\r\n\r\n### Other info / logs\r\n\r\n_No response_\r\n\r\n### What component(s) does this bug affect?\r\n\r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [X] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [ ] `area/server-infra`: MLflow Tracking server backend\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\n### What interface(s) does this bug affect?\r\n\r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n### What language(s) does this bug affect?\r\n\r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\n### What integration(s) does this bug affect?\r\n\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "import os\nimport sys\n\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.models import Model\nfrom mlflow.models.model import MLMODEL_FILE_NAME\nfrom mlflow.protos.databricks_pb2 import RESOURCE_DOES_NOT_EXIST, RESOURCE_ALREADY_EXISTS\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.uri import append_to_uri_path\nfrom mlflow.utils.file_utils import _copy_file_or_tree\n\nFLAVOR_CONFIG_CODE = \"code\"\n\n\ndef _get_flavor_configuration(model_path, flavor_name):\n \"\"\"\n Obtains the configuration for the specified flavor from the specified\n MLflow model path. If the model does not contain the specified flavor,\n an exception will be thrown.\n\n :param model_path: The path to the root directory of the MLflow model for which to load\n the specified flavor configuration.\n :param flavor_name: The name of the flavor configuration to load.\n :return: The flavor configuration as a dictionary.\n \"\"\"\n model_configuration_path = os.path.join(model_path, MLMODEL_FILE_NAME)\n if not os.path.exists(model_configuration_path):\n raise MlflowException(\n 'Could not find an \"{model_file}\" configuration file at \"{model_path}\"'.format(\n model_file=MLMODEL_FILE_NAME, model_path=model_path\n ),\n RESOURCE_DOES_NOT_EXIST,\n )\n\n model_conf = Model.load(model_configuration_path)\n if flavor_name not in model_conf.flavors:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=flavor_name),\n RESOURCE_DOES_NOT_EXIST,\n )\n conf = model_conf.flavors[flavor_name]\n return conf\n\n\ndef _get_flavor_configuration_from_uri(model_uri, flavor_name):\n \"\"\"\n Obtains the configuration for the specified flavor from the specified\n MLflow model uri. If the model does not contain the specified flavor,\n an exception will be thrown.\n\n :param model_uri: The path to the root directory of the MLflow model for which to load\n the specified flavor configuration.\n :param flavor_name: The name of the flavor configuration to load.\n :return: The flavor configuration as a dictionary.\n \"\"\"\n try:\n ml_model_file = _download_artifact_from_uri(\n artifact_uri=append_to_uri_path(model_uri, MLMODEL_FILE_NAME)\n )\n except Exception as ex:\n raise MlflowException(\n 'Failed to download an \"{model_file}\" model file from \"{model_uri}\": {ex}'.format(\n model_file=MLMODEL_FILE_NAME, model_uri=model_uri, ex=ex\n ),\n RESOURCE_DOES_NOT_EXIST,\n )\n model_conf = Model.load(ml_model_file)\n if flavor_name not in model_conf.flavors:\n raise MlflowException(\n 'Model does not have the \"{flavor_name}\" flavor'.format(flavor_name=flavor_name),\n RESOURCE_DOES_NOT_EXIST,\n )\n return model_conf.flavors[flavor_name]\n\n\ndef _get_code_dirs(src_code_path, dst_code_path=None):\n \"\"\"\n Obtains the names of the subdirectories contained under the specified source code\n path and joins them with the specified destination code path.\n :param src_code_path: The path of the source code directory for which to list subdirectories.\n :param dst_code_path: The destination directory path to which subdirectory names should be\n joined.\n \"\"\"\n if not dst_code_path:\n dst_code_path = src_code_path\n return [\n (os.path.join(dst_code_path, x))\n for x in os.listdir(src_code_path)\n if os.path.isdir(os.path.join(src_code_path, x)) and not x == \"__pycache__\"\n ]\n\n\ndef _validate_code_paths(code_paths):\n if code_paths is not None:\n if not isinstance(code_paths, list):\n raise TypeError(\"Argument code_paths should be a list, not {}\".format(type(code_paths)))\n\n\ndef _validate_and_copy_code_paths(code_paths, path, default_subpath=\"code\"):\n \"\"\"\n Validates that a code path is a valid list and copies the code paths to a directory. This\n can later be used to log custom code as an artifact.\n\n :param code_paths: A list of files or directories containing code that should be logged\n as artifacts\n :param path: The local model path.\n :param default_subpath: The default directory name used to store code artifacts.\n \"\"\"\n _validate_code_paths(code_paths)\n if code_paths is not None:\n code_dir_subpath = default_subpath\n for code_path in code_paths:\n _copy_file_or_tree(src=code_path, dst=path, dst_dir=code_dir_subpath)\n else:\n code_dir_subpath = None\n return code_dir_subpath\n\n\ndef _add_code_to_system_path(code_path):\n sys.path = [code_path] + _get_code_dirs(code_path) + sys.path\n\n\ndef _validate_and_prepare_target_save_path(path):\n if os.path.exists(path) and any(os.scandir(path)):\n raise MlflowException(\n message=\"Path '{}' already exists and is not empty\".format(path),\n error_code=RESOURCE_ALREADY_EXISTS,\n )\n\n os.makedirs(path, exist_ok=True)\n\n\ndef _add_code_from_conf_to_system_path(local_path, conf, code_key=FLAVOR_CONFIG_CODE):\n \"\"\"\n Checks if any code_paths were logged with the model in the flavor conf and prepends\n the directory to the system path.\n\n :param local_path: The local path containing model artifacts.\n :param conf: The flavor-specific conf that should contain the FLAVOR_CONFIG_CODE\n key, which specifies the directory containing custom code logged as artifacts.\n :param code_key: The key used by the flavor to indicate custom code artifacts.\n By default this is FLAVOR_CONFIG_CODE.\n \"\"\"\n if code_key in conf and conf[code_key]:\n code_path = os.path.join(local_path, conf[code_key])\n _add_code_to_system_path(code_path)\n", "path": "mlflow/utils/model_utils.py"}]}
| 3,110 | 217 |
gh_patches_debug_3636
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-68
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add /check endpoint
This endpoint should be used to check if the server is alive and ready to receive requests
</issue>
<code>
[start of src/config/urls.py]
1 from django.contrib import admin
2 from django.urls import include, path
3
4 urlpatterns = [
5 path("api/v1/", include("safe_apps.urls", namespace="v1")),
6 path("admin/", admin.site.urls),
7 ]
8
[end of src/config/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/config/urls.py b/src/config/urls.py
--- a/src/config/urls.py
+++ b/src/config/urls.py
@@ -1,7 +1,9 @@
from django.contrib import admin
+from django.http import HttpResponse
from django.urls import include, path
urlpatterns = [
path("api/v1/", include("safe_apps.urls", namespace="v1")),
path("admin/", admin.site.urls),
+ path("check/", lambda request: HttpResponse("Ok"), name="check"),
]
|
{"golden_diff": "diff --git a/src/config/urls.py b/src/config/urls.py\n--- a/src/config/urls.py\n+++ b/src/config/urls.py\n@@ -1,7 +1,9 @@\n from django.contrib import admin\n+from django.http import HttpResponse\n from django.urls import include, path\n \n urlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n+ path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n ]\n", "issue": "Add /check endpoint\nThis endpoint should be used to check if the server is alive and ready to receive requests\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"admin/\", admin.site.urls),\n]\n", "path": "src/config/urls.py"}]}
| 612 | 111 |
gh_patches_debug_24160
|
rasdani/github-patches
|
git_diff
|
voxel51__fiftyone-712
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOCUMENTATION] Add notebook usage options to tutorials/recipes
Analogous to the notebooks in the TF docs, eg https://www.tensorflow.org/guide/data, we should add buttons to the top of the notebooks in our docs (currently, recipes and tutorials) that allow users to
- Run in Colab
- View source on GitHub
- Download notebook
Per the [raw source](https://raw.githubusercontent.com/tensorflow/docs/master/site/en/guide/data.ipynb) of the [github instance](https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb) of the TF example above, it looks like they're achieving this by adding an HTML cell with content as follows:
```html
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
```
</issue>
<code>
[start of docs/source/conf.py]
1 """
2 Sphinx configuration file.
3
4 For a full list of available options, see:
5 https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 | Copyright 2017-2020, Voxel51, Inc.
8 | `voxel51.com <https://voxel51.com/>`_
9 |
10 """
11 import os
12 import re
13 import sys
14
15 sys.path.insert(0, os.path.abspath("."))
16
17 from custom_directives import (
18 CustomButtonDirective,
19 CustomCalloutItemDirective,
20 CustomCardItemDirective,
21 )
22 from redirects import generate_redirects
23
24 import fiftyone.constants as foc
25
26
27 with open("../../setup.py") as f:
28 setup_version = re.search(r'VERSION = "(.+?)"', f.read()).group(1)
29
30 if setup_version != foc.VERSION:
31 raise RuntimeError(
32 "FiftyOne version in setup.py (%r) does not match installed version "
33 "(%r). If this is a dev install, reinstall with `pip install -e .` "
34 "and try again." % (setup_version, foc.VERSION)
35 )
36
37
38 # -- Path setup --------------------------------------------------------------
39
40 # If extensions (or modules to document with autodoc) are in another directory,
41 # add these directories to sys.path here. If the directory is relative to the
42 # documentation root, use os.path.abspath to make it absolute, like shown here.
43 #
44
45
46 # -- Project information -----------------------------------------------------
47
48 project = "FiftyOne"
49 copyright = foc.COPYRIGHT
50 author = foc.AUTHOR
51 release = foc.VERSION
52
53
54 # -- General configuration ---------------------------------------------------
55
56 # Add any Sphinx extension module names here, as strings. They can be
57 # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
58 # ones.
59 extensions = [
60 "sphinx.ext.autodoc",
61 "sphinx.ext.intersphinx",
62 "sphinx.ext.napoleon",
63 "nbsphinx",
64 "sphinx_tabs.tabs",
65 "sphinx_copybutton",
66 "autodocsumm",
67 ]
68
69 # Types of class members to generate documentation for.
70 autodoc_default_options = {
71 "members": True,
72 "inherited-members": True,
73 "member-order": "bysource",
74 "autosummary": True,
75 "autosummary-no-nesting": True,
76 }
77 autodoc_inherit_docstrings = True
78 autoclass_content = "class"
79
80 # Add any paths that contain templates here, relative to this directory.
81 templates_path = ["_templates"]
82
83 # The suffix(es) of source filenames.
84 # You can specify multiple suffix as a list of strings.
85 source_suffix = [".rst", ".md"]
86
87 # List of patterns, relative to source directory, that match files and
88 # directories to ignore when looking for source files.
89 # This pattern also affects html_static_path and html_extra_path.
90 exclude_patterns = ["_includes"]
91
92 # A string of reStructuredText that will be included at the beginning of every
93 # source file that is read
94 rst_prolog = """
95 .. include:: /_includes/substitutions.rst
96 """
97
98 # Disable nbshinx loading require.js - this breaks the pytorch theme's
99 # scrolling handling, and we don't appear to have any notebook content that
100 # requires it
101 nbsphinx_requirejs_path = ""
102
103 # Adds a link to download the notebook to the built HTML
104 nbsphinx_prolog = """
105
106 .. note::
107
108 Download notebook:
109 :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`
110
111 """
112
113 # Path to the redirects file, relative to `source/`
114 redirects_file = "redirects"
115
116 # -- Options for HTML output -------------------------------------------------
117
118 # The theme to use for HTML and HTML Help pages. See the documentation for
119 # a list of builtin themes.
120 #
121 html_theme = "pytorch_sphinx_theme"
122 html_theme_path = ["../theme"]
123 html_theme_options = {
124 "pytorch_project": "docs",
125 }
126
127 # Add any paths that contain custom static files (such as style sheets) here,
128 # relative to this directory. They are copied after the builtin static files,
129 # so a file named "default.css" will overwrite the builtin "default.css".
130 html_static_path = ["_static"]
131
132 # These paths are either relative to html_static_path
133 # or fully qualified paths (eg. https://...)
134 html_css_files = ["css/voxel51-website.css", "css/custom.css"]
135 html_js_files = ["js/voxel51-website.js", "js/custom.js"]
136
137 # Prevent RST source files from being included in output
138 html_copy_source = False
139
140 html_context = {
141 "address_main_line1": "410 N 4th Ave, 3rd Floor",
142 "address_main_line2": "Ann Arbor, MI 48104",
143 "phone_main": "+1 734-489-1134",
144 "email_info": "[email protected]",
145 # Links - copied from website config
146 "link_blog": "https://blog.voxel51.com/",
147 "link_contactus": "mailto:[email protected]?subject=[Voxel51]%20Contact%20us",
148 "link_docs_fiftyone": "https://voxel51.com/docs/fiftyone/",
149 "link_fiftyone": "https://voxel51.com/fiftyone/",
150 "link_fiftyone_tutorials": "https://voxel51.com/docs/fiftyone/tutorials/index.html",
151 "link_fiftyone_examples": "https://github.com/voxel51/fiftyone-examples",
152 "link_github": "https://github.com/",
153 "link_home": "https://voxel51.com/",
154 "link_linkedin": "https://www.linkedin.com/in/",
155 "link_ourstory": "https://voxel51.com/ourstory/",
156 "link_press": "https://voxel51.com/press/",
157 "link_privacypolicy": "https://voxel51.com/privacy/",
158 "link_schedulecall": "mailto:[email protected]?subject=[Voxel51]%20Schedule%20a%20call",
159 "link_scheduledemo": "https://meetings.hubspot.com/michael908",
160 "link_termsofservice": "https://voxel51.com/terms/",
161 "link_twitter": "https://twitter.com/",
162 "link_usecase_advertising": "https://voxel51.com/usecases/advertising/",
163 "link_usecase_auto": "https://voxel51.com/usecases/automotive/",
164 "link_usecase_research": "https://voxel51.com/usecases/research/",
165 "link_usecases": "https://voxel51.com/usecases/",
166 "link_usecases_entry": "https://voxel51.com/usecases/automotive/",
167 "link_voxel51_facebook": "https://www.facebook.com/voxel51/",
168 "link_voxel51_github": "https://github.com/voxel51/",
169 "link_voxel51_linkedin": "https://www.linkedin.com/company/voxel51/",
170 "link_voxel51_slack": "https://join.slack.com/t/fiftyone-users/shared_invite/zt-gtpmm76o-9AjvzNPBOzevBySKzt02gg",
171 "link_voxel51_twitter": "https://twitter.com/voxel51",
172 "link_voxel51_medium": "https://medium.com/voxel51/",
173 # Banner - set to None or empty string to disable
174 # "banner_link": "https://us02web.zoom.us/meeting/register/tZ0kd-GppzsrHNeJEEYcCKsE_t_lI_7iB6Sy",
175 # "banner_message": "Register for <b>the biweekly FiftyOne webinar series</b> on Wednesdays at 1PM EST!",
176 "banner_link": None,
177 "banner_message": None,
178 }
179
180 # -- Custom app setup --------------------------------------------------------
181
182
183 def setup(app):
184 # Generate page redirects
185 app.add_config_value("redirects_file", "redirects", "env")
186 app.connect("builder-inited", generate_redirects)
187
188 # Custom directives
189 app.add_directive("custombutton", CustomButtonDirective)
190 app.add_directive("customcalloutitem", CustomCalloutItemDirective)
191 app.add_directive("customcarditem", CustomCardItemDirective)
192
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -100,15 +100,35 @@
# requires it
nbsphinx_requirejs_path = ""
-# Adds a link to download the notebook to the built HTML
+# Adds helpful external links to the built HTML
+ref = os.environ.get("FO_DOCS_VERSION_REF", "develop")
nbsphinx_prolog = """
-.. note::
-
- Download notebook:
- :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`
-
-"""
+.. raw:: html
+
+ <table class="fo-notebook-links" align="left">
+ <td>
+ <a target="_blank" href="https://colab.research.google.com/github/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}">
+ <img src="../_static/images/logo/colab-logo-32px.png"/>Run in Google Colab
+ </a>
+ </td>
+ <td>
+ <a target="_blank" href="https://github.com/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}">
+ <img src="../_static/images/logo/github-logo-32px.png"/>View source on GitHub
+ </a>
+ </td>
+ <td>
+ <a target="_blank" href="https://gitcdn.link/repo/voxel51/fiftyone/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}" download>
+ <img src="../_static/images/logo/download-logo-32px.png"/>Download notebook
+ </a>
+ </td>
+ </table>
+
+""" % (
+ ref,
+ ref,
+ ref,
+)
# Path to the redirects file, relative to `source/`
redirects_file = "redirects"
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -100,15 +100,35 @@\n # requires it\n nbsphinx_requirejs_path = \"\"\n \n-# Adds a link to download the notebook to the built HTML\n+# Adds helpful external links to the built HTML\n+ref = os.environ.get(\"FO_DOCS_VERSION_REF\", \"develop\")\n nbsphinx_prolog = \"\"\"\n \n-.. note::\n-\n- Download notebook:\n- :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`\n-\n-\"\"\"\n+.. raw:: html\n+\n+ <table class=\"fo-notebook-links\" align=\"left\">\n+ <td>\n+ <a target=\"_blank\" href=\"https://colab.research.google.com/github/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\">\n+ <img src=\"../_static/images/logo/colab-logo-32px.png\"/>Run in Google Colab\n+ </a>\n+ </td>\n+ <td>\n+ <a target=\"_blank\" href=\"https://github.com/voxel51/fiftyone/blob/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\">\n+ <img src=\"../_static/images/logo/github-logo-32px.png\"/>View source on GitHub\n+ </a>\n+ </td>\n+ <td>\n+ <a target=\"_blank\" href=\"https://gitcdn.link/repo/voxel51/fiftyone/%s/docs/source/{{ env.doc2path(env.docname, base=None) }}\" download>\n+ <img src=\"../_static/images/logo/download-logo-32px.png\"/>Download notebook\n+ </a>\n+ </td>\n+ </table>\n+\n+\"\"\" % (\n+ ref,\n+ ref,\n+ ref,\n+)\n \n # Path to the redirects file, relative to `source/`\n redirects_file = \"redirects\"\n", "issue": "[DOCUMENTATION] Add notebook usage options to tutorials/recipes\nAnalogous to the notebooks in the TF docs, eg https://www.tensorflow.org/guide/data, we should add buttons to the top of the notebooks in our docs (currently, recipes and tutorials) that allow users to\r\n- Run in Colab\r\n- View source on GitHub\r\n- Download notebook\r\n\r\nPer the [raw source](https://raw.githubusercontent.com/tensorflow/docs/master/site/en/guide/data.ipynb) of the [github instance](https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb) of the TF example above, it looks like they're achieving this by adding an HTML cell with content as follows:\r\n\r\n```html\r\n<table class=\"tfo-notebook-buttons\" align=\"left\">\r\n <td>\r\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/data\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\r\n </td>\r\n <td>\r\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/data.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\r\n </td>\r\n <td>\r\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/data.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\r\n </td>\r\n <td>\r\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/data.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\r\n </td>\r\n</table>\r\n```\n", "before_files": [{"content": "\"\"\"\nSphinx configuration file.\n\nFor a full list of available options, see:\nhttps://www.sphinx-doc.org/en/master/usage/configuration.html\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\nfrom custom_directives import (\n CustomButtonDirective,\n CustomCalloutItemDirective,\n CustomCardItemDirective,\n)\nfrom redirects import generate_redirects\n\nimport fiftyone.constants as foc\n\n\nwith open(\"../../setup.py\") as f:\n setup_version = re.search(r'VERSION = \"(.+?)\"', f.read()).group(1)\n\nif setup_version != foc.VERSION:\n raise RuntimeError(\n \"FiftyOne version in setup.py (%r) does not match installed version \"\n \"(%r). If this is a dev install, reinstall with `pip install -e .` \"\n \"and try again.\" % (setup_version, foc.VERSION)\n )\n\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"FiftyOne\"\ncopyright = foc.COPYRIGHT\nauthor = foc.AUTHOR\nrelease = foc.VERSION\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named \"sphinx.ext.*\") or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"nbsphinx\",\n \"sphinx_tabs.tabs\",\n \"sphinx_copybutton\",\n \"autodocsumm\",\n]\n\n# Types of class members to generate documentation for.\nautodoc_default_options = {\n \"members\": True,\n \"inherited-members\": True,\n \"member-order\": \"bysource\",\n \"autosummary\": True,\n \"autosummary-no-nesting\": True,\n}\nautodoc_inherit_docstrings = True\nautoclass_content = \"class\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of strings.\nsource_suffix = [\".rst\", \".md\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_includes\"]\n\n# A string of reStructuredText that will be included at the beginning of every\n# source file that is read\nrst_prolog = \"\"\"\n.. include:: /_includes/substitutions.rst\n\"\"\"\n\n# Disable nbshinx loading require.js - this breaks the pytorch theme's\n# scrolling handling, and we don't appear to have any notebook content that\n# requires it\nnbsphinx_requirejs_path = \"\"\n\n# Adds a link to download the notebook to the built HTML\nnbsphinx_prolog = \"\"\"\n\n.. note::\n\n Download notebook:\n :download:`{{ env.doc2path(env.docname, base=None) }} </{{ env.doc2path(env.docname, base=None) }}>`\n\n\"\"\"\n\n# Path to the redirects file, relative to `source/`\nredirects_file = \"redirects\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [\"../theme\"]\nhtml_theme_options = {\n \"pytorch_project\": \"docs\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# These paths are either relative to html_static_path\n# or fully qualified paths (eg. https://...)\nhtml_css_files = [\"css/voxel51-website.css\", \"css/custom.css\"]\nhtml_js_files = [\"js/voxel51-website.js\", \"js/custom.js\"]\n\n# Prevent RST source files from being included in output\nhtml_copy_source = False\n\nhtml_context = {\n \"address_main_line1\": \"410 N 4th Ave, 3rd Floor\",\n \"address_main_line2\": \"Ann Arbor, MI 48104\",\n \"phone_main\": \"+1 734-489-1134\",\n \"email_info\": \"[email protected]\",\n # Links - copied from website config\n \"link_blog\": \"https://blog.voxel51.com/\",\n \"link_contactus\": \"mailto:[email protected]?subject=[Voxel51]%20Contact%20us\",\n \"link_docs_fiftyone\": \"https://voxel51.com/docs/fiftyone/\",\n \"link_fiftyone\": \"https://voxel51.com/fiftyone/\",\n \"link_fiftyone_tutorials\": \"https://voxel51.com/docs/fiftyone/tutorials/index.html\",\n \"link_fiftyone_examples\": \"https://github.com/voxel51/fiftyone-examples\",\n \"link_github\": \"https://github.com/\",\n \"link_home\": \"https://voxel51.com/\",\n \"link_linkedin\": \"https://www.linkedin.com/in/\",\n \"link_ourstory\": \"https://voxel51.com/ourstory/\",\n \"link_press\": \"https://voxel51.com/press/\",\n \"link_privacypolicy\": \"https://voxel51.com/privacy/\",\n \"link_schedulecall\": \"mailto:[email protected]?subject=[Voxel51]%20Schedule%20a%20call\",\n \"link_scheduledemo\": \"https://meetings.hubspot.com/michael908\",\n \"link_termsofservice\": \"https://voxel51.com/terms/\",\n \"link_twitter\": \"https://twitter.com/\",\n \"link_usecase_advertising\": \"https://voxel51.com/usecases/advertising/\",\n \"link_usecase_auto\": \"https://voxel51.com/usecases/automotive/\",\n \"link_usecase_research\": \"https://voxel51.com/usecases/research/\",\n \"link_usecases\": \"https://voxel51.com/usecases/\",\n \"link_usecases_entry\": \"https://voxel51.com/usecases/automotive/\",\n \"link_voxel51_facebook\": \"https://www.facebook.com/voxel51/\",\n \"link_voxel51_github\": \"https://github.com/voxel51/\",\n \"link_voxel51_linkedin\": \"https://www.linkedin.com/company/voxel51/\",\n \"link_voxel51_slack\": \"https://join.slack.com/t/fiftyone-users/shared_invite/zt-gtpmm76o-9AjvzNPBOzevBySKzt02gg\",\n \"link_voxel51_twitter\": \"https://twitter.com/voxel51\",\n \"link_voxel51_medium\": \"https://medium.com/voxel51/\",\n # Banner - set to None or empty string to disable\n # \"banner_link\": \"https://us02web.zoom.us/meeting/register/tZ0kd-GppzsrHNeJEEYcCKsE_t_lI_7iB6Sy\",\n # \"banner_message\": \"Register for <b>the biweekly FiftyOne webinar series</b> on Wednesdays at 1PM EST!\",\n \"banner_link\": None,\n \"banner_message\": None,\n}\n\n# -- Custom app setup --------------------------------------------------------\n\n\ndef setup(app):\n # Generate page redirects\n app.add_config_value(\"redirects_file\", \"redirects\", \"env\")\n app.connect(\"builder-inited\", generate_redirects)\n\n # Custom directives\n app.add_directive(\"custombutton\", CustomButtonDirective)\n app.add_directive(\"customcalloutitem\", CustomCalloutItemDirective)\n app.add_directive(\"customcarditem\", CustomCardItemDirective)\n", "path": "docs/source/conf.py"}]}
| 3,279 | 462 |
gh_patches_debug_43055
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-10279
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up previously generated CSV files
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
## Observed behavior
When a new CSV file is generated for session and summary logs, the previously generated files that are stored in the user's database are no longer accessible for download in the UI. A record of the new CSV file request is saved to the logger model `GenerateCSVLogRequest` which contains the facility id, log type (session or summary), the selected start date, the selected end date, and the date that the request was made. There should only ever be one record for each facility and log type combination. The information in this modal is used to scan the database folder `log_exports` and retrieve the previously generated file for download.
Note: For session and summary log files, the file naming convention has been updated to include a date-range selection, with the dates formatted as `YYYY-MM-DD`.
Previous naming convention:
- `{facility_name}_{last 4 digits of facility ID}_{log export type}.csv`
Current naming convention:
- `{facility_name}_{last 4 digits of facility ID}_{log export type}_from_{start date}_to_{end date}.csv`
## Expected behavior
We should remove any files that do not have a record in the database. One way to achieve this is by scanning the folder `log_export`, which contains all exported log files, using the predictable file naming convention, and cross-referencing them with the file records stored in the database. Any session or summary files that do not have a corresponding record in the database should be deleted.
With a 1:1 correlation with the `GenerateCSVLogRequest` record, the database should only contain one exported file for each session and summary log type. A new async task should be created for performing this cleanup, and can be queued whenever a new log export is generated. If the task has a predictable id, it can be ensured that there is only one cleanup task going at once.
More details on the task system can be found [here](https://kolibri-dev.readthedocs.io/en/develop/backend_architecture/tasks.html).
## User-facing consequences
Ensures the database is free from redundant files and optimizes the user's database storage.
## Acceptance Criteria
- [ ] All previously generated session and summary log file exports that lack a corresponding record in the database have been deleted.
- [ ] The previously generated CSV file should be automatically removed from the database upon generating a new one.
- [ ] New python code should be covered by unit tests.
</issue>
<code>
[start of kolibri/core/logger/tasks.py]
1 import os
2
3 from django.core.management import call_command
4 from rest_framework import serializers
5
6 from kolibri.core.auth.models import Facility
7 from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES
8 from kolibri.core.tasks.decorators import register_task
9 from kolibri.core.tasks.permissions import IsAdminForJob
10 from kolibri.core.tasks.validation import JobValidator
11 from kolibri.utils import conf
12
13
14 def get_filepath(log_type, facility_id, start_date, end_date):
15 facility = Facility.objects.get(id=facility_id)
16 logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
17 if not os.path.isdir(logs_dir):
18 os.mkdir(logs_dir)
19 filepath = os.path.join(
20 logs_dir,
21 CSV_EXPORT_FILENAMES[log_type].format(
22 facility.name, facility.id[:4], start_date[:10], end_date[:10]
23 ),
24 )
25 return filepath
26
27
28 class ExportLogCSVValidator(JobValidator):
29 facility = serializers.PrimaryKeyRelatedField(
30 queryset=Facility.objects.all(), required=False
31 )
32 start_date = serializers.CharField()
33 end_date = serializers.CharField()
34 locale = serializers.CharField(required=False)
35
36 def validate(self, data):
37 facility = data.get("facility", None)
38 start_date = data.get("start_date", None)
39 end_date = data.get("end_date", None)
40 locale = data.get("locale", None)
41
42 if facility is None and "user" in self.context:
43 facility = self.context["user"].facility
44 elif facility is None:
45 raise serializers.ValidationError(
46 "Facility must be specified when no user is available."
47 )
48 if not start_date or not end_date:
49 raise serializers.ValidationError(
50 "Start {} and End {} date values are required.".format(
51 start_date, end_date
52 )
53 )
54 kwargs = {
55 "facility": facility.id,
56 "start_date": start_date,
57 "end_date": end_date,
58 "locale": locale,
59 }
60 return {
61 "facility_id": facility.id,
62 "kwargs": kwargs,
63 "args": [facility.id],
64 }
65
66
67 def _exportlogcsv(log_type, facility_id, start_date, end_date, locale):
68 filepath = get_filepath(log_type, facility_id, start_date, end_date)
69 call_command(
70 "exportlogs",
71 log_type=log_type,
72 output_file=filepath,
73 facility=facility_id,
74 overwrite=True,
75 start_date=start_date,
76 end_date=end_date,
77 locale=locale,
78 )
79
80
81 @register_task(
82 validator=ExportLogCSVValidator,
83 track_progress=True,
84 permission_classes=[IsAdminForJob],
85 )
86 def exportsessionlogcsv(facility_id, **kwargs):
87 """
88 Dumps in csv format the content session logs.
89
90 :param: facility.
91 """
92 _exportlogcsv(
93 "session",
94 facility_id,
95 kwargs.get("start_date"),
96 kwargs.get("end_date"),
97 kwargs.get("locale"),
98 )
99
100
101 @register_task(
102 validator=ExportLogCSVValidator,
103 track_progress=True,
104 permission_classes=[IsAdminForJob],
105 )
106 def exportsummarylogcsv(facility_id, **kwargs):
107 """
108 Dumps in csv format the content summary logs.
109
110 :param: facility.
111 """
112 _exportlogcsv(
113 "summary",
114 facility_id,
115 kwargs.get("start_date"),
116 kwargs.get("end_date"),
117 kwargs.get("locale"),
118 )
119
[end of kolibri/core/logger/tasks.py]
[start of kolibri/core/logger/management/commands/exportlogs.py]
1 import logging
2 import ntpath
3 import os
4
5 from dateutil import parser
6 from django.conf import settings
7 from django.core.management.base import CommandError
8 from django.utils import translation
9
10 from kolibri.core.auth.constants.commands_errors import FILE_WRITE_ERROR
11 from kolibri.core.auth.constants.commands_errors import INVALID
12 from kolibri.core.auth.constants.commands_errors import MESSAGES
13 from kolibri.core.auth.constants.commands_errors import NO_FACILITY
14 from kolibri.core.auth.models import Facility
15 from kolibri.core.logger.csv_export import classes_info
16 from kolibri.core.logger.csv_export import csv_file_generator
17 from kolibri.core.logger.models import GenerateCSVLogRequest
18 from kolibri.core.tasks.management.commands.base import AsyncCommand
19 from kolibri.core.tasks.utils import get_current_job
20 from kolibri.utils.time_utils import local_now
21
22 logger = logging.getLogger(__name__)
23
24
25 class Command(AsyncCommand):
26 def add_arguments(self, parser):
27 parser.add_argument(
28 "-O",
29 "--output-file",
30 action="store",
31 dest="output_file",
32 default=None,
33 type=str,
34 help="The generated file will be saved with this name",
35 )
36 parser.add_argument(
37 "-l",
38 "--log-type",
39 action="store",
40 dest="log_type",
41 default="session",
42 choices=classes_info.keys(),
43 help='Log type to be exported. Valid values are "session" and "summary".',
44 )
45 parser.add_argument(
46 "-w",
47 "--overwrite",
48 action="store_true",
49 dest="overwrite",
50 default=False,
51 help="Allows overwritten of the exported file in case it exists",
52 )
53 parser.add_argument(
54 "--facility",
55 action="store",
56 type=str,
57 help="Facility id to import the users into",
58 )
59 parser.add_argument(
60 "--locale",
61 action="store",
62 type=str,
63 default=None,
64 help="Code of the language for the messages to be translated",
65 )
66 parser.add_argument(
67 "--start_date",
68 action="store",
69 dest="start_date",
70 type=str,
71 help="Start date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS",
72 )
73 parser.add_argument(
74 "--end_date",
75 action="store",
76 dest="end_date",
77 type=str,
78 help="End date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS",
79 )
80
81 def get_facility(self, options):
82 if options["facility"]:
83 default_facility = Facility.objects.get(pk=options["facility"])
84 else:
85 default_facility = Facility.get_default_facility()
86
87 return default_facility
88
89 def validate_date(self, date_str):
90 try:
91 return bool(parser.parse(date_str))
92 except ValueError:
93 return False
94
95 def handle_async(self, *args, **options):
96
97 # set language for the translation of the messages
98 locale = settings.LANGUAGE_CODE if not options["locale"] else options["locale"]
99 translation.activate(locale)
100 self.overall_error = ""
101 job = get_current_job()
102
103 start_date = options["start_date"]
104 end_date = options["end_date"]
105
106 facility = self.get_facility(options)
107 if not facility:
108 self.overall_error = str(MESSAGES[NO_FACILITY])
109
110 elif not self.validate_date(start_date):
111 self.overall_error = str(MESSAGES[INVALID]).format("start_date")
112
113 elif not self.validate_date(end_date):
114 self.overall_error = str(MESSAGES[INVALID]).format("end_date")
115
116 else:
117 log_type = options["log_type"]
118
119 log_info = classes_info[log_type]
120
121 if options["output_file"] is None:
122 filename = log_info["filename"].format(
123 facility.name, facility.id[:4], start_date[:10], end_date[:10]
124 )
125 else:
126 filename = options["output_file"]
127
128 filepath = os.path.join(os.getcwd(), filename)
129
130 queryset = log_info["queryset"]
131
132 total_rows = queryset.count()
133
134 with self.start_progress(total=total_rows) as progress_update:
135 try:
136 for row in csv_file_generator(
137 facility,
138 log_type,
139 filepath,
140 start_date=start_date,
141 end_date=end_date,
142 overwrite=options["overwrite"],
143 ):
144 progress_update(1)
145 except (ValueError, IOError) as e:
146 self.overall_error = str(MESSAGES[FILE_WRITE_ERROR].format(e))
147
148 if job:
149 job.extra_metadata["overall_error"] = self.overall_error
150 self.job.extra_metadata["filename"] = ntpath.basename(filepath)
151 job.save_meta()
152 else:
153 if self.overall_error:
154 raise CommandError(self.overall_error)
155 else:
156 logger.info(
157 "Created csv file {} with {} lines".format(filepath, total_rows)
158 )
159
160 translation.deactivate()
161
162 # create or update record of log request
163 GenerateCSVLogRequest.objects.update_or_create(
164 log_type=log_type,
165 facility=facility,
166 defaults={
167 "selected_start_date": parser.parse(start_date),
168 "selected_end_date": parser.parse(end_date),
169 "date_requested": local_now(),
170 },
171 )
172
[end of kolibri/core/logger/management/commands/exportlogs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/core/logger/management/commands/exportlogs.py b/kolibri/core/logger/management/commands/exportlogs.py
--- a/kolibri/core/logger/management/commands/exportlogs.py
+++ b/kolibri/core/logger/management/commands/exportlogs.py
@@ -15,6 +15,7 @@
from kolibri.core.logger.csv_export import classes_info
from kolibri.core.logger.csv_export import csv_file_generator
from kolibri.core.logger.models import GenerateCSVLogRequest
+from kolibri.core.logger.tasks import log_exports_cleanup
from kolibri.core.tasks.management.commands.base import AsyncCommand
from kolibri.core.tasks.utils import get_current_job
from kolibri.utils.time_utils import local_now
@@ -169,3 +170,4 @@
"date_requested": local_now(),
},
)
+ log_exports_cleanup.enqueue()
diff --git a/kolibri/core/logger/tasks.py b/kolibri/core/logger/tasks.py
--- a/kolibri/core/logger/tasks.py
+++ b/kolibri/core/logger/tasks.py
@@ -3,13 +3,19 @@
from django.core.management import call_command
from rest_framework import serializers
+from kolibri.core.auth.management.commands.bulkexportusers import (
+ CSV_EXPORT_FILENAMES as USER_CSV_EXPORT_FILENAMES,
+)
from kolibri.core.auth.models import Facility
from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES
+from kolibri.core.logger.models import GenerateCSVLogRequest
from kolibri.core.tasks.decorators import register_task
from kolibri.core.tasks.permissions import IsAdminForJob
from kolibri.core.tasks.validation import JobValidator
from kolibri.utils import conf
+LOGS_CLEANUP_JOB_ID = "18"
+
def get_filepath(log_type, facility_id, start_date, end_date):
facility = Facility.objects.get(id=facility_id)
@@ -25,6 +31,53 @@
return filepath
+def get_valid_logs_csv_filenames():
+ """
+ Returns a set of valid filenames that should exist
+ based on the objects stored in GenerateCSVLogRequest.
+ Any other files except these filenames should be removed.
+ """
+ valid_filenames_set = set()
+ log_requests = GenerateCSVLogRequest.objects.all()
+ for log_request in log_requests:
+ full_path = get_filepath(
+ log_request.log_type,
+ log_request.facility_id,
+ log_request.selected_start_date.strftime("%Y-%m-%d"),
+ log_request.selected_end_date.strftime("%Y-%m-%d"),
+ )
+ valid_filenames_set.add(os.path.basename(full_path))
+ return valid_filenames_set
+
+
+def get_valid_users_csv_filenames():
+ """
+ Returns a set of valid filenames that should exist
+ based on the objects stored in Facility.
+ """
+ valid_filenames_set = set()
+ facilities = Facility.objects.values("id", "name").all()
+ for facility in facilities:
+ file_name = USER_CSV_EXPORT_FILENAMES["user"].format(
+ facility["name"], facility["id"][:4]
+ )
+ valid_filenames_set.add(file_name)
+ return valid_filenames_set
+
+
+def get_valid_filenames():
+ """
+ Returns a union set of valid filenames
+ for log exports and users csv exports.
+ These filenames are valid and will not be
+ cleaned from log_exports_cleanup.
+ """
+ valid_logs_filenames = get_valid_logs_csv_filenames()
+ valid_users_filenames = get_valid_users_csv_filenames()
+ valid_filenames_set = valid_logs_filenames.union(valid_users_filenames)
+ return valid_filenames_set
+
+
class ExportLogCSVValidator(JobValidator):
facility = serializers.PrimaryKeyRelatedField(
queryset=Facility.objects.all(), required=False
@@ -116,3 +169,18 @@
kwargs.get("end_date"),
kwargs.get("locale"),
)
+
+
+@register_task(job_id=LOGS_CLEANUP_JOB_ID)
+def log_exports_cleanup():
+ """
+ Cleanup log_exports csv files that does not have
+ related reocord in GenerateCSVLogRequest model
+ """
+ logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
+ if not os.path.isdir(logs_dir):
+ return
+ valid_filenames_set = get_valid_filenames()
+ for filename in os.listdir(logs_dir):
+ if filename not in valid_filenames_set:
+ os.remove(os.path.join(logs_dir, filename))
|
{"golden_diff": "diff --git a/kolibri/core/logger/management/commands/exportlogs.py b/kolibri/core/logger/management/commands/exportlogs.py\n--- a/kolibri/core/logger/management/commands/exportlogs.py\n+++ b/kolibri/core/logger/management/commands/exportlogs.py\n@@ -15,6 +15,7 @@\n from kolibri.core.logger.csv_export import classes_info\n from kolibri.core.logger.csv_export import csv_file_generator\n from kolibri.core.logger.models import GenerateCSVLogRequest\n+from kolibri.core.logger.tasks import log_exports_cleanup\n from kolibri.core.tasks.management.commands.base import AsyncCommand\n from kolibri.core.tasks.utils import get_current_job\n from kolibri.utils.time_utils import local_now\n@@ -169,3 +170,4 @@\n \"date_requested\": local_now(),\n },\n )\n+ log_exports_cleanup.enqueue()\ndiff --git a/kolibri/core/logger/tasks.py b/kolibri/core/logger/tasks.py\n--- a/kolibri/core/logger/tasks.py\n+++ b/kolibri/core/logger/tasks.py\n@@ -3,13 +3,19 @@\n from django.core.management import call_command\n from rest_framework import serializers\n \n+from kolibri.core.auth.management.commands.bulkexportusers import (\n+ CSV_EXPORT_FILENAMES as USER_CSV_EXPORT_FILENAMES,\n+)\n from kolibri.core.auth.models import Facility\n from kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES\n+from kolibri.core.logger.models import GenerateCSVLogRequest\n from kolibri.core.tasks.decorators import register_task\n from kolibri.core.tasks.permissions import IsAdminForJob\n from kolibri.core.tasks.validation import JobValidator\n from kolibri.utils import conf\n \n+LOGS_CLEANUP_JOB_ID = \"18\"\n+\n \n def get_filepath(log_type, facility_id, start_date, end_date):\n facility = Facility.objects.get(id=facility_id)\n@@ -25,6 +31,53 @@\n return filepath\n \n \n+def get_valid_logs_csv_filenames():\n+ \"\"\"\n+ Returns a set of valid filenames that should exist\n+ based on the objects stored in GenerateCSVLogRequest.\n+ Any other files except these filenames should be removed.\n+ \"\"\"\n+ valid_filenames_set = set()\n+ log_requests = GenerateCSVLogRequest.objects.all()\n+ for log_request in log_requests:\n+ full_path = get_filepath(\n+ log_request.log_type,\n+ log_request.facility_id,\n+ log_request.selected_start_date.strftime(\"%Y-%m-%d\"),\n+ log_request.selected_end_date.strftime(\"%Y-%m-%d\"),\n+ )\n+ valid_filenames_set.add(os.path.basename(full_path))\n+ return valid_filenames_set\n+\n+\n+def get_valid_users_csv_filenames():\n+ \"\"\"\n+ Returns a set of valid filenames that should exist\n+ based on the objects stored in Facility.\n+ \"\"\"\n+ valid_filenames_set = set()\n+ facilities = Facility.objects.values(\"id\", \"name\").all()\n+ for facility in facilities:\n+ file_name = USER_CSV_EXPORT_FILENAMES[\"user\"].format(\n+ facility[\"name\"], facility[\"id\"][:4]\n+ )\n+ valid_filenames_set.add(file_name)\n+ return valid_filenames_set\n+\n+\n+def get_valid_filenames():\n+ \"\"\"\n+ Returns a union set of valid filenames\n+ for log exports and users csv exports.\n+ These filenames are valid and will not be\n+ cleaned from log_exports_cleanup.\n+ \"\"\"\n+ valid_logs_filenames = get_valid_logs_csv_filenames()\n+ valid_users_filenames = get_valid_users_csv_filenames()\n+ valid_filenames_set = valid_logs_filenames.union(valid_users_filenames)\n+ return valid_filenames_set\n+\n+\n class ExportLogCSVValidator(JobValidator):\n facility = serializers.PrimaryKeyRelatedField(\n queryset=Facility.objects.all(), required=False\n@@ -116,3 +169,18 @@\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n+\n+\n+@register_task(job_id=LOGS_CLEANUP_JOB_ID)\n+def log_exports_cleanup():\n+ \"\"\"\n+ Cleanup log_exports csv files that does not have\n+ related reocord in GenerateCSVLogRequest model\n+ \"\"\"\n+ logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n+ if not os.path.isdir(logs_dir):\n+ return\n+ valid_filenames_set = get_valid_filenames()\n+ for filename in os.listdir(logs_dir):\n+ if filename not in valid_filenames_set:\n+ os.remove(os.path.join(logs_dir, filename))\n", "issue": "Clean up previously generated CSV files\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n## Observed behavior\r\nWhen a new CSV file is generated for session and summary logs, the previously generated files that are stored in the user's database are no longer accessible for download in the UI. A record of the new CSV file request is saved to the logger model `GenerateCSVLogRequest` which contains the facility id, log type (session or summary), the selected start date, the selected end date, and the date that the request was made. There should only ever be one record for each facility and log type combination. The information in this modal is used to scan the database folder `log_exports` and retrieve the previously generated file for download.\r\n\r\nNote: For session and summary log files, the file naming convention has been updated to include a date-range selection, with the dates formatted as `YYYY-MM-DD`.\r\nPrevious naming convention:\r\n- `{facility_name}_{last 4 digits of facility ID}_{log export type}.csv`\r\n\r\nCurrent naming convention:\r\n- `{facility_name}_{last 4 digits of facility ID}_{log export type}_from_{start date}_to_{end date}.csv`\r\n\r\n## Expected behavior\r\nWe should remove any files that do not have a record in the database. One way to achieve this is by scanning the folder `log_export`, which contains all exported log files, using the predictable file naming convention, and cross-referencing them with the file records stored in the database. Any session or summary files that do not have a corresponding record in the database should be deleted.\r\n\r\nWith a 1:1 correlation with the `GenerateCSVLogRequest` record, the database should only contain one exported file for each session and summary log type. A new async task should be created for performing this cleanup, and can be queued whenever a new log export is generated. If the task has a predictable id, it can be ensured that there is only one cleanup task going at once.\r\n\r\nMore details on the task system can be found [here](https://kolibri-dev.readthedocs.io/en/develop/backend_architecture/tasks.html).\r\n\r\n## User-facing consequences\r\nEnsures the database is free from redundant files and optimizes the user's database storage.\r\n\r\n## Acceptance Criteria\r\n\r\n- [ ] All previously generated session and summary log file exports that lack a corresponding record in the database have been deleted.\r\n- [ ] The previously generated CSV file should be automatically removed from the database upon generating a new one.\r\n- [ ] New python code should be covered by unit tests.\r\n\n", "before_files": [{"content": "import os\n\nfrom django.core.management import call_command\nfrom rest_framework import serializers\n\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.logger.csv_export import CSV_EXPORT_FILENAMES\nfrom kolibri.core.tasks.decorators import register_task\nfrom kolibri.core.tasks.permissions import IsAdminForJob\nfrom kolibri.core.tasks.validation import JobValidator\nfrom kolibri.utils import conf\n\n\ndef get_filepath(log_type, facility_id, start_date, end_date):\n facility = Facility.objects.get(id=facility_id)\n logs_dir = os.path.join(conf.KOLIBRI_HOME, \"log_export\")\n if not os.path.isdir(logs_dir):\n os.mkdir(logs_dir)\n filepath = os.path.join(\n logs_dir,\n CSV_EXPORT_FILENAMES[log_type].format(\n facility.name, facility.id[:4], start_date[:10], end_date[:10]\n ),\n )\n return filepath\n\n\nclass ExportLogCSVValidator(JobValidator):\n facility = serializers.PrimaryKeyRelatedField(\n queryset=Facility.objects.all(), required=False\n )\n start_date = serializers.CharField()\n end_date = serializers.CharField()\n locale = serializers.CharField(required=False)\n\n def validate(self, data):\n facility = data.get(\"facility\", None)\n start_date = data.get(\"start_date\", None)\n end_date = data.get(\"end_date\", None)\n locale = data.get(\"locale\", None)\n\n if facility is None and \"user\" in self.context:\n facility = self.context[\"user\"].facility\n elif facility is None:\n raise serializers.ValidationError(\n \"Facility must be specified when no user is available.\"\n )\n if not start_date or not end_date:\n raise serializers.ValidationError(\n \"Start {} and End {} date values are required.\".format(\n start_date, end_date\n )\n )\n kwargs = {\n \"facility\": facility.id,\n \"start_date\": start_date,\n \"end_date\": end_date,\n \"locale\": locale,\n }\n return {\n \"facility_id\": facility.id,\n \"kwargs\": kwargs,\n \"args\": [facility.id],\n }\n\n\ndef _exportlogcsv(log_type, facility_id, start_date, end_date, locale):\n filepath = get_filepath(log_type, facility_id, start_date, end_date)\n call_command(\n \"exportlogs\",\n log_type=log_type,\n output_file=filepath,\n facility=facility_id,\n overwrite=True,\n start_date=start_date,\n end_date=end_date,\n locale=locale,\n )\n\n\n@register_task(\n validator=ExportLogCSVValidator,\n track_progress=True,\n permission_classes=[IsAdminForJob],\n)\ndef exportsessionlogcsv(facility_id, **kwargs):\n \"\"\"\n Dumps in csv format the content session logs.\n\n :param: facility.\n \"\"\"\n _exportlogcsv(\n \"session\",\n facility_id,\n kwargs.get(\"start_date\"),\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n\n\n@register_task(\n validator=ExportLogCSVValidator,\n track_progress=True,\n permission_classes=[IsAdminForJob],\n)\ndef exportsummarylogcsv(facility_id, **kwargs):\n \"\"\"\n Dumps in csv format the content summary logs.\n\n :param: facility.\n \"\"\"\n _exportlogcsv(\n \"summary\",\n facility_id,\n kwargs.get(\"start_date\"),\n kwargs.get(\"end_date\"),\n kwargs.get(\"locale\"),\n )\n", "path": "kolibri/core/logger/tasks.py"}, {"content": "import logging\nimport ntpath\nimport os\n\nfrom dateutil import parser\nfrom django.conf import settings\nfrom django.core.management.base import CommandError\nfrom django.utils import translation\n\nfrom kolibri.core.auth.constants.commands_errors import FILE_WRITE_ERROR\nfrom kolibri.core.auth.constants.commands_errors import INVALID\nfrom kolibri.core.auth.constants.commands_errors import MESSAGES\nfrom kolibri.core.auth.constants.commands_errors import NO_FACILITY\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.logger.csv_export import classes_info\nfrom kolibri.core.logger.csv_export import csv_file_generator\nfrom kolibri.core.logger.models import GenerateCSVLogRequest\nfrom kolibri.core.tasks.management.commands.base import AsyncCommand\nfrom kolibri.core.tasks.utils import get_current_job\nfrom kolibri.utils.time_utils import local_now\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(AsyncCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"-O\",\n \"--output-file\",\n action=\"store\",\n dest=\"output_file\",\n default=None,\n type=str,\n help=\"The generated file will be saved with this name\",\n )\n parser.add_argument(\n \"-l\",\n \"--log-type\",\n action=\"store\",\n dest=\"log_type\",\n default=\"session\",\n choices=classes_info.keys(),\n help='Log type to be exported. Valid values are \"session\" and \"summary\".',\n )\n parser.add_argument(\n \"-w\",\n \"--overwrite\",\n action=\"store_true\",\n dest=\"overwrite\",\n default=False,\n help=\"Allows overwritten of the exported file in case it exists\",\n )\n parser.add_argument(\n \"--facility\",\n action=\"store\",\n type=str,\n help=\"Facility id to import the users into\",\n )\n parser.add_argument(\n \"--locale\",\n action=\"store\",\n type=str,\n default=None,\n help=\"Code of the language for the messages to be translated\",\n )\n parser.add_argument(\n \"--start_date\",\n action=\"store\",\n dest=\"start_date\",\n type=str,\n help=\"Start date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS\",\n )\n parser.add_argument(\n \"--end_date\",\n action=\"store\",\n dest=\"end_date\",\n type=str,\n help=\"End date for date range selection of log files. Valid value is an ISO string formatted as YYYY-MM-DDTHH:MM:SS\",\n )\n\n def get_facility(self, options):\n if options[\"facility\"]:\n default_facility = Facility.objects.get(pk=options[\"facility\"])\n else:\n default_facility = Facility.get_default_facility()\n\n return default_facility\n\n def validate_date(self, date_str):\n try:\n return bool(parser.parse(date_str))\n except ValueError:\n return False\n\n def handle_async(self, *args, **options):\n\n # set language for the translation of the messages\n locale = settings.LANGUAGE_CODE if not options[\"locale\"] else options[\"locale\"]\n translation.activate(locale)\n self.overall_error = \"\"\n job = get_current_job()\n\n start_date = options[\"start_date\"]\n end_date = options[\"end_date\"]\n\n facility = self.get_facility(options)\n if not facility:\n self.overall_error = str(MESSAGES[NO_FACILITY])\n\n elif not self.validate_date(start_date):\n self.overall_error = str(MESSAGES[INVALID]).format(\"start_date\")\n\n elif not self.validate_date(end_date):\n self.overall_error = str(MESSAGES[INVALID]).format(\"end_date\")\n\n else:\n log_type = options[\"log_type\"]\n\n log_info = classes_info[log_type]\n\n if options[\"output_file\"] is None:\n filename = log_info[\"filename\"].format(\n facility.name, facility.id[:4], start_date[:10], end_date[:10]\n )\n else:\n filename = options[\"output_file\"]\n\n filepath = os.path.join(os.getcwd(), filename)\n\n queryset = log_info[\"queryset\"]\n\n total_rows = queryset.count()\n\n with self.start_progress(total=total_rows) as progress_update:\n try:\n for row in csv_file_generator(\n facility,\n log_type,\n filepath,\n start_date=start_date,\n end_date=end_date,\n overwrite=options[\"overwrite\"],\n ):\n progress_update(1)\n except (ValueError, IOError) as e:\n self.overall_error = str(MESSAGES[FILE_WRITE_ERROR].format(e))\n\n if job:\n job.extra_metadata[\"overall_error\"] = self.overall_error\n self.job.extra_metadata[\"filename\"] = ntpath.basename(filepath)\n job.save_meta()\n else:\n if self.overall_error:\n raise CommandError(self.overall_error)\n else:\n logger.info(\n \"Created csv file {} with {} lines\".format(filepath, total_rows)\n )\n\n translation.deactivate()\n\n # create or update record of log request\n GenerateCSVLogRequest.objects.update_or_create(\n log_type=log_type,\n facility=facility,\n defaults={\n \"selected_start_date\": parser.parse(start_date),\n \"selected_end_date\": parser.parse(end_date),\n \"date_requested\": local_now(),\n },\n )\n", "path": "kolibri/core/logger/management/commands/exportlogs.py"}]}
| 3,647 | 974 |
gh_patches_debug_34281
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-830
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
zh_CN (and possible other providers) return invalid results for domain_name
Brief summary of the issue goes here.
### Steps to reproduce
f = Faker('zh_CN')
f.domain_name()
>he.cn
### Expected behavior
it should return valid domain names
### Actual behavior
he.cn is not a valid domain. he is in the list of second-level domains (https://en.wikipedia.org/wiki/.cn#Second-level_domains_of_provinces) and will cause failures in places where domains are being validated.
it's unclear where this change should/could be made after briefly reviewing the source code.
</issue>
<code>
[start of faker/providers/internet/zh_CN/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as InternetProvider
4 from faker.utils.decorators import slugify
5
6
7 class Provider(InternetProvider):
8 user_name_formats = (
9 '{{last_romanized_name}}.{{first_romanized_name}}',
10 '{{first_romanized_name}}.{{last_romanized_name}}',
11 '{{first_romanized_name}}##',
12 '?{{last_romanized_name}}',
13 )
14 tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')
15
16 @slugify
17 def domain_word(self):
18 return self.generator.format('last_romanized_name')
19
[end of faker/providers/internet/zh_CN/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/providers/internet/zh_CN/__init__.py b/faker/providers/internet/zh_CN/__init__.py
--- a/faker/providers/internet/zh_CN/__init__.py
+++ b/faker/providers/internet/zh_CN/__init__.py
@@ -1,5 +1,6 @@
# coding=utf-8
from __future__ import unicode_literals
+from collections import OrderedDict
from .. import Provider as InternetProvider
from faker.utils.decorators import slugify
@@ -11,8 +12,54 @@
'{{first_romanized_name}}##',
'?{{last_romanized_name}}',
)
- tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')
+
+ tlds = OrderedDict((
+ ('cn', 0.8),
+ ('net', 0.1),
+ ('com', 0.05),
+ ('org', 0.05),
+ ))
+
+ second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',
+ 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',
+ 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',
+ 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',
+ 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')
+
+ domain_formats = (
+ '##', '??',
+ '{{first_romanized_name}}',
+ '{{last_romanized_name}}',
+ '{{first_romanized_name}}{{last_romanized_name}}',
+ '{{last_romanized_name}}{{last_romanized_name}}',
+ '{{first_romanized_name}}{{first_romanized_name}}',
+ )
@slugify
def domain_word(self):
- return self.generator.format('last_romanized_name')
+ pattern = self.random_element(self.domain_formats)
+ if '#' in pattern or '?' in pattern:
+ return self.bothify(pattern)
+ else:
+ return self.generator.parse(pattern)
+
+ def domain_name(self, levels=1):
+ if levels < 1:
+ raise ValueError("levels must be greater than or equal to 1")
+ if levels == 1:
+ domain_word = self.domain_word()
+ # Avoids he.cn as seen in issue #687
+ while domain_word in self.second_level_domains:
+ domain_word = self.domain_word()
+ return domain_word + '.' + self.tld()
+ elif levels == 2:
+ my_tld = self.tld()
+ my_second_level = ''
+ if my_tld == 'cn':
+ my_second_level = self.random_element(self.second_level_domains)
+ else:
+ my_second_level = self.domain_word()
+ return self.domain_word() + '.' + my_second_level + '.' + my_tld
+ else:
+ return self.domain_word() + '.' + self.domain_name(levels - 1)
+
|
{"golden_diff": "diff --git a/faker/providers/internet/zh_CN/__init__.py b/faker/providers/internet/zh_CN/__init__.py\n--- a/faker/providers/internet/zh_CN/__init__.py\n+++ b/faker/providers/internet/zh_CN/__init__.py\n@@ -1,5 +1,6 @@\n # coding=utf-8\n from __future__ import unicode_literals\n+from collections import OrderedDict\n from .. import Provider as InternetProvider\n from faker.utils.decorators import slugify\n \n@@ -11,8 +12,54 @@\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n- tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')\n+\n+ tlds = OrderedDict((\n+ ('cn', 0.8),\n+ ('net', 0.1),\n+ ('com', 0.05),\n+ ('org', 0.05),\n+ ))\n+\n+ second_level_domains = ('ac', 'com', 'edu', 'gov', 'mil', 'net', 'org',\n+ 'ah', 'bj', 'cq', 'fj', 'gd', 'gs', 'gz', 'gx',\n+ 'ha', 'hb', 'he', 'hi', 'hk', 'hl', 'hn', 'jl',\n+ 'js', 'jx', 'ln', 'mo', 'nm', 'nx', 'qh', 'sc',\n+ 'sd', 'sh', 'sn', 'sx', 'tj', 'xj', 'xz', 'yn', 'zj')\n+\n+ domain_formats = (\n+ '##', '??',\n+ '{{first_romanized_name}}',\n+ '{{last_romanized_name}}',\n+ '{{first_romanized_name}}{{last_romanized_name}}',\n+ '{{last_romanized_name}}{{last_romanized_name}}',\n+ '{{first_romanized_name}}{{first_romanized_name}}',\n+ )\n \n @slugify\n def domain_word(self):\n- return self.generator.format('last_romanized_name')\n+ pattern = self.random_element(self.domain_formats)\n+ if '#' in pattern or '?' in pattern:\n+ return self.bothify(pattern)\n+ else:\n+ return self.generator.parse(pattern)\n+\n+ def domain_name(self, levels=1):\n+ if levels < 1:\n+ raise ValueError(\"levels must be greater than or equal to 1\")\n+ if levels == 1:\n+ domain_word = self.domain_word()\n+ # Avoids he.cn as seen in issue #687\n+ while domain_word in self.second_level_domains:\n+ domain_word = self.domain_word()\n+ return domain_word + '.' + self.tld()\n+ elif levels == 2:\n+ my_tld = self.tld()\n+ my_second_level = ''\n+ if my_tld == 'cn':\n+ my_second_level = self.random_element(self.second_level_domains)\n+ else:\n+ my_second_level = self.domain_word()\n+ return self.domain_word() + '.' + my_second_level + '.' + my_tld\n+ else:\n+ return self.domain_word() + '.' + self.domain_name(levels - 1)\n+\n", "issue": "zh_CN (and possible other providers) return invalid results for domain_name\nBrief summary of the issue goes here.\r\n\r\n### Steps to reproduce\r\n\r\nf = Faker('zh_CN')\r\nf.domain_name()\r\n>he.cn\r\n\r\n### Expected behavior\r\n\r\nit should return valid domain names\r\n\r\n### Actual behavior\r\n\r\nhe.cn is not a valid domain. he is in the list of second-level domains (https://en.wikipedia.org/wiki/.cn#Second-level_domains_of_provinces) and will cause failures in places where domains are being validated.\r\n\r\nit's unclear where this change should/could be made after briefly reviewing the source code.\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\nfrom faker.utils.decorators import slugify\n\n\nclass Provider(InternetProvider):\n user_name_formats = (\n '{{last_romanized_name}}.{{first_romanized_name}}',\n '{{first_romanized_name}}.{{last_romanized_name}}',\n '{{first_romanized_name}}##',\n '?{{last_romanized_name}}',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'cn', 'cn', 'cn')\n\n @slugify\n def domain_word(self):\n return self.generator.format('last_romanized_name')\n", "path": "faker/providers/internet/zh_CN/__init__.py"}]}
| 852 | 724 |
gh_patches_debug_20196
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-8465
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failing to load sqlalchemy-plugin in pyinstaller'ed exe
This line of code works perfectly fine while running as a file from spyder.
engine = sqlalchemy.create_engine('teradata://uid:pwd@DBinstance?driver=Teradata')
However, after making an exe file using pyinstaller, I get the following error while running the exe:
sqlalchemy.exc.NoSuchModuleError: Can't load plugin:sqlalchemy.dialects:teradata
This looks to be an exe-creation-issue in loading libraries. Has anyone encountered a similar issue and was able to resolve? Any pointers would be appreciated.
</issue>
<code>
[start of PyInstaller/hooks/hook-sqlalchemy.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2023, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import re
13 import importlib.util
14
15 from PyInstaller import isolated
16 from PyInstaller.lib.modulegraph.modulegraph import SourceModule
17 from PyInstaller.utils.hooks import check_requirement, logger
18
19 # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
20 excludedimports = ['sqlalchemy.testing']
21
22 # Include most common database bindings some database bindings are detected and include some are not. We should
23 # explicitly include database backends.
24 hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']
25
26 if check_requirement('sqlalchemy >= 1.4'):
27 hiddenimports.append("sqlalchemy.sql.default_comparator")
28
29
30 @isolated.decorate
31 def _get_dialect_modules(module_name):
32 import importlib
33 module = importlib.import_module(module_name)
34 return [f"{module_name}.{submodule_name}" for submodule_name in module.__all__]
35
36
37 # In SQLAlchemy >= 0.6, the "sqlalchemy.dialects" package provides dialects.
38 # In SQLAlchemy <= 0.5, the "sqlalchemy.databases" package provides dialects.
39 if check_requirement('sqlalchemy >= 0.6'):
40 hiddenimports += _get_dialect_modules("sqlalchemy.dialects")
41 else:
42 hiddenimports += _get_dialect_modules("sqlalchemy.databases")
43
44
45 def hook(hook_api):
46 """
47 SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:
48
49 @util.dependencies("sqlalchemy.sql.schema")
50
51 This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks
52 those modules as hidden imports.
53 """
54
55 if not check_requirement('sqlalchemy >= 0.9'):
56 return
57
58 # this parser is very simplistic but seems to catch all cases as of V1.1
59 depend_regex = re.compile(r'@util.dependencies\([\'"](.*?)[\'"]\)')
60
61 hidden_imports_set = set()
62 known_imports = set()
63 for node in hook_api.module_graph.iter_graph(start=hook_api.module):
64 if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):
65 known_imports.add(node.identifier)
66
67 # Read the source...
68 with open(node.filename, 'rb') as f:
69 source_code = f.read()
70 source_code = importlib.util.decode_source(source_code)
71
72 # ... and scan it
73 for match in depend_regex.findall(source_code):
74 hidden_imports_set.add(match)
75
76 hidden_imports_set -= known_imports
77 if len(hidden_imports_set):
78 logger.info(" Found %d sqlalchemy hidden imports", len(hidden_imports_set))
79 hook_api.add_imports(*list(hidden_imports_set))
80
[end of PyInstaller/hooks/hook-sqlalchemy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py
--- a/PyInstaller/hooks/hook-sqlalchemy.py
+++ b/PyInstaller/hooks/hook-sqlalchemy.py
@@ -14,7 +14,9 @@
from PyInstaller import isolated
from PyInstaller.lib.modulegraph.modulegraph import SourceModule
-from PyInstaller.utils.hooks import check_requirement, logger
+from PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger
+
+datas = []
# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.
excludedimports = ['sqlalchemy.testing']
@@ -41,6 +43,13 @@
else:
hiddenimports += _get_dialect_modules("sqlalchemy.databases")
+# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available
+# in the build environment for a reason (i.e., they are used).
+for entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):
+ ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)
+ datas += ep_datas
+ hiddenimports += ep_hiddenimports
+
def hook(hook_api):
"""
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-sqlalchemy.py b/PyInstaller/hooks/hook-sqlalchemy.py\n--- a/PyInstaller/hooks/hook-sqlalchemy.py\n+++ b/PyInstaller/hooks/hook-sqlalchemy.py\n@@ -14,7 +14,9 @@\n \n from PyInstaller import isolated\n from PyInstaller.lib.modulegraph.modulegraph import SourceModule\n-from PyInstaller.utils.hooks import check_requirement, logger\n+from PyInstaller.utils.hooks import check_requirement, collect_entry_point, logger\n+\n+datas = []\n \n # 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\n excludedimports = ['sqlalchemy.testing']\n@@ -41,6 +43,13 @@\n else:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n \n+# Collect additional dialects and plugins that are registered via entry-points, under assumption that they are available\n+# in the build environment for a reason (i.e., they are used).\n+for entry_point_name in ('sqlalchemy.dialects', 'sqlalchemy.plugins'):\n+ ep_datas, ep_hiddenimports = collect_entry_point(entry_point_name)\n+ datas += ep_datas\n+ hiddenimports += ep_hiddenimports\n+\n \n def hook(hook_api):\n \"\"\"\n", "issue": "Failing to load sqlalchemy-plugin in pyinstaller'ed exe\nThis line of code works perfectly fine while running as a file from spyder.\r\n\r\nengine = sqlalchemy.create_engine('teradata://uid:pwd@DBinstance?driver=Teradata')\r\nHowever, after making an exe file using pyinstaller, I get the following error while running the exe:\r\n\r\nsqlalchemy.exc.NoSuchModuleError: Can't load plugin:sqlalchemy.dialects:teradata\r\n\r\nThis looks to be an exe-creation-issue in loading libraries. Has anyone encountered a similar issue and was able to resolve? Any pointers would be appreciated.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport re\nimport importlib.util\n\nfrom PyInstaller import isolated\nfrom PyInstaller.lib.modulegraph.modulegraph import SourceModule\nfrom PyInstaller.utils.hooks import check_requirement, logger\n\n# 'sqlalchemy.testing' causes bundling a lot of unnecessary modules.\nexcludedimports = ['sqlalchemy.testing']\n\n# Include most common database bindings some database bindings are detected and include some are not. We should\n# explicitly include database backends.\nhiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'sqlalchemy.ext.baked']\n\nif check_requirement('sqlalchemy >= 1.4'):\n hiddenimports.append(\"sqlalchemy.sql.default_comparator\")\n\n\[email protected]\ndef _get_dialect_modules(module_name):\n import importlib\n module = importlib.import_module(module_name)\n return [f\"{module_name}.{submodule_name}\" for submodule_name in module.__all__]\n\n\n# In SQLAlchemy >= 0.6, the \"sqlalchemy.dialects\" package provides dialects.\n# In SQLAlchemy <= 0.5, the \"sqlalchemy.databases\" package provides dialects.\nif check_requirement('sqlalchemy >= 0.6'):\n hiddenimports += _get_dialect_modules(\"sqlalchemy.dialects\")\nelse:\n hiddenimports += _get_dialect_modules(\"sqlalchemy.databases\")\n\n\ndef hook(hook_api):\n \"\"\"\n SQLAlchemy 0.9 introduced the decorator 'util.dependencies'. This decorator does imports. E.g.:\n\n @util.dependencies(\"sqlalchemy.sql.schema\")\n\n This hook scans for included SQLAlchemy modules and then scans those modules for any util.dependencies and marks\n those modules as hidden imports.\n \"\"\"\n\n if not check_requirement('sqlalchemy >= 0.9'):\n return\n\n # this parser is very simplistic but seems to catch all cases as of V1.1\n depend_regex = re.compile(r'@util.dependencies\\([\\'\"](.*?)[\\'\"]\\)')\n\n hidden_imports_set = set()\n known_imports = set()\n for node in hook_api.module_graph.iter_graph(start=hook_api.module):\n if isinstance(node, SourceModule) and node.identifier.startswith('sqlalchemy.'):\n known_imports.add(node.identifier)\n\n # Read the source...\n with open(node.filename, 'rb') as f:\n source_code = f.read()\n source_code = importlib.util.decode_source(source_code)\n\n # ... and scan it\n for match in depend_regex.findall(source_code):\n hidden_imports_set.add(match)\n\n hidden_imports_set -= known_imports\n if len(hidden_imports_set):\n logger.info(\" Found %d sqlalchemy hidden imports\", len(hidden_imports_set))\n hook_api.add_imports(*list(hidden_imports_set))\n", "path": "PyInstaller/hooks/hook-sqlalchemy.py"}]}
| 1,505 | 271 |
gh_patches_debug_9333
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-6638
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Intermittent duplicate key errors when creating default sysadmin
**CKAN version**
CKAN 2.9.x. Does not appear to affect 2.8.
**Describe the bug**
When our continuous integration tests initialise a test CKAN 2.9 instance, they sometimes encounter a duplicate key exception when CKAN is attempting to create the 'default' sysadmin account. There does not appear to be any specific pattern to when the errors occur; re-running the build may resolve it, or not. This did not occur on CKAN 2.8.
**Steps to reproduce**
Our test scripts run the following CLI commands:
ckan -c /etc/ckan/default/production.ini db clean --yes
ckan -c /etc/ckan/default/production.ini db init
ckan -c /etc/ckan/default/production.ini comments initdb
ckan -c /etc/ckan/default/production.ini comments updatedb
ckan -c /etc/ckan/default/production.ini comments init_notifications_db
**Additional details**
Sample failed build:
https://github.com/qld-gov-au/ckanext-ytp-comments/runs/4480610398?check_suite_focus=true
(Search for "already exists" to locate the stack trace)
NB Although the error actually occurred on that run during a `ckanext-ytp-comments` command, it was not specific to the extension, as shown by an excerpt from the stack trace:
ckan_1 | File "/app/ckan/default/src/ckan/ckan/cli/cli.py", line 102, in _init_ckan_config
ckan_1 | ctx.obj = CkanCommand(value)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/cli/cli.py", line 52, in __init__
ckan_1 | self.app = make_app(self.config)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/config/middleware/__init__.py", line 56, in make_app
ckan_1 | load_environment(conf)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/config/environment.py", line 123, in load_environment
ckan_1 | p.load_all()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 161, in load_all
ckan_1 | unload_all()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 208, in unload_all
ckan_1 | unload(*reversed(_PLUGINS))
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 236, in unload
ckan_1 | plugins_update()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/plugins/core.py", line 153, in plugins_update
ckan_1 | environment.update_config()
ckan_1 | File "/app/ckan/default/src/ckan/ckan/config/environment.py", line 322, in update_config
ckan_1 | logic.get_action('get_site_user')({'ignore_auth': True}, None)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/logic/__init__.py", line 477, in wrapped
ckan_1 | result = _action(context, data_dict, **kw)
ckan_1 | File "/app/ckan/default/src/ckan/ckan/logic/action/get.py", line 2398, in get_site_user
ckan_1 | model.Session.flush()
</issue>
<code>
[start of ckan/config/environment.py]
1 # encoding: utf-8
2
3 '''CKAN environment configuration'''
4 import os
5 import logging
6 import warnings
7 import pytz
8
9 import sqlalchemy
10
11 import ckan.model as model
12 import ckan.plugins as p
13 import ckan.lib.plugins as lib_plugins
14 import ckan.lib.helpers as helpers
15 import ckan.lib.app_globals as app_globals
16 from ckan.lib.redis import is_redis_available
17 import ckan.lib.search as search
18 import ckan.logic as logic
19 import ckan.authz as authz
20 from ckan.lib.webassets_tools import webassets_init
21 from ckan.lib.i18n import build_js_translations
22
23 from ckan.common import config, config_declaration
24 from ckan.exceptions import CkanConfigurationException
25 log = logging.getLogger(__name__)
26
27 # Suppress benign warning 'Unbuilt egg for setuptools'
28 warnings.simplefilter('ignore', UserWarning)
29
30
31 def load_environment(conf):
32 """
33 Configure the Pylons environment via the ``pylons.config`` object. This
34 code should only need to be run once.
35 """
36 os.environ['CKAN_CONFIG'] = conf['__file__']
37
38 valid_base_public_folder_names = ['public']
39 static_files = conf.get('ckan.base_public_folder', 'public')
40 conf['ckan.base_public_folder'] = static_files
41
42 if static_files not in valid_base_public_folder_names:
43 raise CkanConfigurationException(
44 'You provided an invalid value for ckan.base_public_folder. '
45 'Possible values are: "public".'
46 )
47
48 log.info('Loading static files from %s' % static_files)
49
50 # Initialize main CKAN config object
51 config.update(conf)
52
53 # Setup the SQLAlchemy database engine
54 # Suppress a couple of sqlalchemy warnings
55 msgs = ['^Unicode type received non-unicode bind param value',
56 "^Did not recognize type 'BIGINT' of column 'size'",
57 "^Did not recognize type 'tsvector' of column 'search_vector'"
58 ]
59 for msg in msgs:
60 warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)
61
62 # load all CKAN plugins
63 p.load_all()
64
65 # Check Redis availability
66 if not is_redis_available():
67 log.critical('Could not connect to Redis.')
68
69 app_globals.reset()
70
71 # issue #3260: remove idle transaction
72 # Session that was used for getting all config params nor committed,
73 # neither removed and we have idle connection as result
74 model.Session.commit()
75
76 # Build JavaScript translations. Must be done after plugins have
77 # been loaded.
78 build_js_translations()
79
80
81 # A mapping of config settings that can be overridden by env vars.
82 # Note: Do not remove the following lines, they are used in the docs
83 # Start CONFIG_FROM_ENV_VARS
84 CONFIG_FROM_ENV_VARS = {
85 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',
86 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',
87 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
88 'ckan.redis.url': 'CKAN_REDIS_URL',
89 'solr_url': 'CKAN_SOLR_URL',
90 'solr_user': 'CKAN_SOLR_USER',
91 'solr_password': 'CKAN_SOLR_PASSWORD',
92 'ckan.site_id': 'CKAN_SITE_ID',
93 'ckan.site_url': 'CKAN_SITE_URL',
94 'ckan.storage_path': 'CKAN_STORAGE_PATH',
95 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',
96 'smtp.server': 'CKAN_SMTP_SERVER',
97 'smtp.starttls': 'CKAN_SMTP_STARTTLS',
98 'smtp.user': 'CKAN_SMTP_USER',
99 'smtp.password': 'CKAN_SMTP_PASSWORD',
100 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',
101 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'
102 }
103 # End CONFIG_FROM_ENV_VARS
104
105
106 def update_config():
107 ''' This code needs to be run when the config is changed to take those
108 changes into account. It is called whenever a plugin is loaded as the
109 plugin might have changed the config values (for instance it might
110 change ckan.site_url) '''
111
112 config_declaration.setup()
113 config_declaration.make_safe(config)
114 config_declaration.normalize(config)
115
116 webassets_init()
117
118 for plugin in p.PluginImplementations(p.IConfigurer):
119 # must do update in place as this does not work:
120 # config = plugin.update_config(config)
121 plugin.update_config(config)
122
123 # Set whitelisted env vars on config object
124 # This is set up before globals are initialized
125
126 ckan_db = os.environ.get('CKAN_DB', None)
127 if ckan_db:
128 msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
129 ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
130 log.warn(msg)
131 config['sqlalchemy.url'] = ckan_db
132
133 for option in CONFIG_FROM_ENV_VARS:
134 from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
135 if from_env:
136 config[option] = from_env
137
138 if config.get_value("config.mode") == "strict":
139 _, errors = config_declaration.validate(config)
140 if errors:
141 msg = "\n".join(
142 "{}: {}".format(key, "; ".join(issues))
143 for key, issues in errors.items()
144 )
145 raise CkanConfigurationException(msg)
146
147 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
148
149 site_url = config.get_value('ckan.site_url')
150 if not site_url:
151 raise RuntimeError(
152 'ckan.site_url is not configured and it must have a value.'
153 ' Please amend your .ini file.')
154 if not site_url.lower().startswith('http'):
155 raise RuntimeError(
156 'ckan.site_url should be a full URL, including the schema '
157 '(http or https)')
158 # Remove backslash from site_url if present
159 config['ckan.site_url'] = site_url.rstrip('/')
160
161 display_timezone = config.get_value('ckan.display_timezone')
162 if (display_timezone and
163 display_timezone != 'server' and
164 display_timezone not in pytz.all_timezones):
165 raise CkanConfigurationException(
166 "ckan.display_timezone is not 'server' or a valid timezone"
167 )
168
169 # Init SOLR settings and check if the schema is compatible
170 # from ckan.lib.search import SolrSettings, check_solr_schema_version
171
172 # lib.search is imported here as we need the config enabled and parsed
173 search.SolrSettings.init(config.get_value('solr_url'),
174 config.get_value('solr_user'),
175 config.get_value('solr_password'))
176 search.check_solr_schema_version()
177
178 lib_plugins.reset_package_plugins()
179 lib_plugins.register_package_plugins()
180 lib_plugins.reset_group_plugins()
181 lib_plugins.register_group_plugins()
182
183 # initialise the globals
184 app_globals.app_globals._init()
185
186 helpers.load_plugin_helpers()
187
188 # Templates and CSS loading from configuration
189 valid_base_templates_folder_names = ['templates']
190 templates = config.get_value('ckan.base_templates_folder')
191 config['ckan.base_templates_folder'] = templates
192
193 if templates not in valid_base_templates_folder_names:
194 raise CkanConfigurationException(
195 'You provided an invalid value for ckan.base_templates_folder. '
196 'Possible values are: "templates".'
197 )
198
199 jinja2_templates_path = os.path.join(root, templates)
200 log.info('Loading templates from %s' % jinja2_templates_path)
201 template_paths = [jinja2_templates_path]
202
203 extra_template_paths = config.get_value('extra_template_paths')
204 if extra_template_paths:
205 # must be first for them to override defaults
206 template_paths = extra_template_paths.split(',') + template_paths
207 config['computed_template_paths'] = template_paths
208
209 # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)
210 # to eliminate database errors due to stale pooled connections
211 config.setdefault('sqlalchemy.pool_pre_ping', True)
212 # Initialize SQLAlchemy
213 engine = sqlalchemy.engine_from_config(config)
214 model.init_model(engine)
215
216 for plugin in p.PluginImplementations(p.IConfigurable):
217 plugin.configure(config)
218
219 # clear other caches
220 logic.clear_actions_cache()
221 logic.clear_validators_cache()
222 authz.clear_auth_functions_cache()
223
224 # Here we create the site user if they are not already in the database
225 try:
226 logic.get_action('get_site_user')({'ignore_auth': True}, None)
227 except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
228 # The database is not yet initialised. It happens in `ckan db init`
229 pass
230
231 # Close current session and open database connections to ensure a clean
232 # clean environment even if an error occurs later on
233 model.Session.remove()
234 model.Session.bind.dispose()
235
[end of ckan/config/environment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -227,6 +227,9 @@
except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
# The database is not yet initialised. It happens in `ckan db init`
pass
+ except sqlalchemy.exc.IntegrityError:
+ # Race condition, user already exists.
+ pass
# Close current session and open database connections to ensure a clean
# clean environment even if an error occurs later on
|
{"golden_diff": "diff --git a/ckan/config/environment.py b/ckan/config/environment.py\n--- a/ckan/config/environment.py\n+++ b/ckan/config/environment.py\n@@ -227,6 +227,9 @@\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n+ except sqlalchemy.exc.IntegrityError:\n+ # Race condition, user already exists.\n+ pass\n \n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n", "issue": "Intermittent duplicate key errors when creating default sysadmin\n**CKAN version**\r\n\r\nCKAN 2.9.x. Does not appear to affect 2.8.\r\n\r\n**Describe the bug**\r\n\r\nWhen our continuous integration tests initialise a test CKAN 2.9 instance, they sometimes encounter a duplicate key exception when CKAN is attempting to create the 'default' sysadmin account. There does not appear to be any specific pattern to when the errors occur; re-running the build may resolve it, or not. This did not occur on CKAN 2.8.\r\n\r\n**Steps to reproduce**\r\n\r\nOur test scripts run the following CLI commands:\r\n\r\nckan -c /etc/ckan/default/production.ini db clean --yes\r\nckan -c /etc/ckan/default/production.ini db init\r\nckan -c /etc/ckan/default/production.ini comments initdb\r\nckan -c /etc/ckan/default/production.ini comments updatedb\r\nckan -c /etc/ckan/default/production.ini comments init_notifications_db\r\n\r\n**Additional details**\r\nSample failed build:\r\n\r\nhttps://github.com/qld-gov-au/ckanext-ytp-comments/runs/4480610398?check_suite_focus=true\r\n\r\n(Search for \"already exists\" to locate the stack trace)\r\n\r\nNB Although the error actually occurred on that run during a `ckanext-ytp-comments` command, it was not specific to the extension, as shown by an excerpt from the stack trace:\r\n\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/cli/cli.py\", line 102, in _init_ckan_config\r\n ckan_1 | ctx.obj = CkanCommand(value)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/cli/cli.py\", line 52, in __init__\r\n ckan_1 | self.app = make_app(self.config)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/config/middleware/__init__.py\", line 56, in make_app\r\n ckan_1 | load_environment(conf)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/config/environment.py\", line 123, in load_environment\r\n ckan_1 | p.load_all()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 161, in load_all\r\n ckan_1 | unload_all()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 208, in unload_all\r\n ckan_1 | unload(*reversed(_PLUGINS))\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 236, in unload\r\n ckan_1 | plugins_update()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/plugins/core.py\", line 153, in plugins_update\r\n ckan_1 | environment.update_config()\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/config/environment.py\", line 322, in update_config\r\n ckan_1 | logic.get_action('get_site_user')({'ignore_auth': True}, None)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/logic/__init__.py\", line 477, in wrapped\r\n ckan_1 | result = _action(context, data_dict, **kw)\r\n ckan_1 | File \"/app/ckan/default/src/ckan/ckan/logic/action/get.py\", line 2398, in get_site_user\r\n ckan_1 | model.Session.flush()\n", "before_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nimport pytz\n\nimport sqlalchemy\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.plugins as lib_plugins\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.lib.webassets_tools import webassets_init\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import config, config_declaration\nfrom ckan.exceptions import CkanConfigurationException\nlog = logging.getLogger(__name__)\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n os.environ['CKAN_CONFIG'] = conf['__file__']\n\n valid_base_public_folder_names = ['public']\n static_files = conf.get('ckan.base_public_folder', 'public')\n conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n\n # Initialize main CKAN config object\n config.update(conf)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM',\n 'ckan.max_resource_size': 'CKAN_MAX_UPLOAD_SIZE_MB'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n config_declaration.setup()\n config_declaration.make_safe(config)\n config_declaration.normalize(config)\n\n webassets_init()\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n if config.get_value(\"config.mode\") == \"strict\":\n _, errors = config_declaration.validate(config)\n if errors:\n msg = \"\\n\".join(\n \"{}: {}\".format(key, \"; \".join(issues))\n for key, issues in errors.items()\n )\n raise CkanConfigurationException(msg)\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get_value('ckan.site_url')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n # Remove backslash from site_url if present\n config['ckan.site_url'] = site_url.rstrip('/')\n\n display_timezone = config.get_value('ckan.display_timezone')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get_value('solr_url'),\n config.get_value('solr_user'),\n config.get_value('solr_password'))\n search.check_solr_schema_version()\n\n lib_plugins.reset_package_plugins()\n lib_plugins.register_package_plugins()\n lib_plugins.reset_group_plugins()\n lib_plugins.register_group_plugins()\n\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates']\n templates = config.get_value('ckan.base_templates_folder')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get_value('extra_template_paths')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Enable pessimistic disconnect handling (added in SQLAlchemy 1.2)\n # to eliminate database errors due to stale pooled connections\n config.setdefault('sqlalchemy.pool_pre_ping', True)\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config)\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # The database is not yet initialised. It happens in `ckan db init`\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}]}
| 3,933 | 137 |
gh_patches_debug_25836
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-1735
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for FT.AGGREGATE with LOAD *
redis-py should support recent changes to RediSearch. For reference, see the following: RediSearch/RediSearch#2301
</issue>
<code>
[start of redis/commands/search/aggregation.py]
1 FIELDNAME = object()
2
3
4 class Limit:
5 def __init__(self, offset=0, count=0):
6 self.offset = offset
7 self.count = count
8
9 def build_args(self):
10 if self.count:
11 return ["LIMIT", str(self.offset), str(self.count)]
12 else:
13 return []
14
15
16 class Reducer:
17 """
18 Base reducer object for all reducers.
19
20 See the `redisearch.reducers` module for the actual reducers.
21 """
22
23 NAME = None
24
25 def __init__(self, *args):
26 self._args = args
27 self._field = None
28 self._alias = None
29
30 def alias(self, alias):
31 """
32 Set the alias for this reducer.
33
34 ### Parameters
35
36 - **alias**: The value of the alias for this reducer. If this is the
37 special value `aggregation.FIELDNAME` then this reducer will be
38 aliased using the same name as the field upon which it operates.
39 Note that using `FIELDNAME` is only possible on reducers which
40 operate on a single field value.
41
42 This method returns the `Reducer` object making it suitable for
43 chaining.
44 """
45 if alias is FIELDNAME:
46 if not self._field:
47 raise ValueError("Cannot use FIELDNAME alias with no field")
48 # Chop off initial '@'
49 alias = self._field[1:]
50 self._alias = alias
51 return self
52
53 @property
54 def args(self):
55 return self._args
56
57
58 class SortDirection:
59 """
60 This special class is used to indicate sort direction.
61 """
62
63 DIRSTRING = None
64
65 def __init__(self, field):
66 self.field = field
67
68
69 class Asc(SortDirection):
70 """
71 Indicate that the given field should be sorted in ascending order
72 """
73
74 DIRSTRING = "ASC"
75
76
77 class Desc(SortDirection):
78 """
79 Indicate that the given field should be sorted in descending order
80 """
81
82 DIRSTRING = "DESC"
83
84
85 class AggregateRequest:
86 """
87 Aggregation request which can be passed to `Client.aggregate`.
88 """
89
90 def __init__(self, query="*"):
91 """
92 Create an aggregation request. This request may then be passed to
93 `client.aggregate()`.
94
95 In order for the request to be usable, it must contain at least one
96 group.
97
98 - **query** Query string for filtering records.
99
100 All member methods (except `build_args()`)
101 return the object itself, making them useful for chaining.
102 """
103 self._query = query
104 self._aggregateplan = []
105 self._loadfields = []
106 self._limit = Limit()
107 self._max = 0
108 self._with_schema = False
109 self._verbatim = False
110 self._cursor = []
111
112 def load(self, *fields):
113 """
114 Indicate the fields to be returned in the response. These fields are
115 returned in addition to any others implicitly specified.
116
117 ### Parameters
118
119 - **fields**: One or more fields in the format of `@field`
120 """
121 self._loadfields.extend(fields)
122 return self
123
124 def group_by(self, fields, *reducers):
125 """
126 Specify by which fields to group the aggregation.
127
128 ### Parameters
129
130 - **fields**: Fields to group by. This can either be a single string,
131 or a list of strings. both cases, the field should be specified as
132 `@field`.
133 - **reducers**: One or more reducers. Reducers may be found in the
134 `aggregation` module.
135 """
136 fields = [fields] if isinstance(fields, str) else fields
137 reducers = [reducers] if isinstance(reducers, Reducer) else reducers
138
139 ret = ["GROUPBY", str(len(fields)), *fields]
140 for reducer in reducers:
141 ret += ["REDUCE", reducer.NAME, str(len(reducer.args))]
142 ret.extend(reducer.args)
143 if reducer._alias is not None:
144 ret += ["AS", reducer._alias]
145
146 self._aggregateplan.extend(ret)
147 return self
148
149 def apply(self, **kwexpr):
150 """
151 Specify one or more projection expressions to add to each result
152
153 ### Parameters
154
155 - **kwexpr**: One or more key-value pairs for a projection. The key is
156 the alias for the projection, and the value is the projection
157 expression itself, for example `apply(square_root="sqrt(@foo)")`
158 """
159 for alias, expr in kwexpr.items():
160 ret = ["APPLY", expr]
161 if alias is not None:
162 ret += ["AS", alias]
163 self._aggregateplan.extend(ret)
164
165 return self
166
167 def limit(self, offset, num):
168 """
169 Sets the limit for the most recent group or query.
170
171 If no group has been defined yet (via `group_by()`) then this sets
172 the limit for the initial pool of results from the query. Otherwise,
173 this limits the number of items operated on from the previous group.
174
175 Setting a limit on the initial search results may be useful when
176 attempting to execute an aggregation on a sample of a large data set.
177
178 ### Parameters
179
180 - **offset**: Result offset from which to begin paging
181 - **num**: Number of results to return
182
183
184 Example of sorting the initial results:
185
186 ```
187 AggregateRequest("@sale_amount:[10000, inf]")\
188 .limit(0, 10)\
189 .group_by("@state", r.count())
190 ```
191
192 Will only group by the states found in the first 10 results of the
193 query `@sale_amount:[10000, inf]`. On the other hand,
194
195 ```
196 AggregateRequest("@sale_amount:[10000, inf]")\
197 .limit(0, 1000)\
198 .group_by("@state", r.count()\
199 .limit(0, 10)
200 ```
201
202 Will group all the results matching the query, but only return the
203 first 10 groups.
204
205 If you only wish to return a *top-N* style query, consider using
206 `sort_by()` instead.
207
208 """
209 self._limit = Limit(offset, num)
210 return self
211
212 def sort_by(self, *fields, **kwargs):
213 """
214 Indicate how the results should be sorted. This can also be used for
215 *top-N* style queries
216
217 ### Parameters
218
219 - **fields**: The fields by which to sort. This can be either a single
220 field or a list of fields. If you wish to specify order, you can
221 use the `Asc` or `Desc` wrapper classes.
222 - **max**: Maximum number of results to return. This can be
223 used instead of `LIMIT` and is also faster.
224
225
226 Example of sorting by `foo` ascending and `bar` descending:
227
228 ```
229 sort_by(Asc("@foo"), Desc("@bar"))
230 ```
231
232 Return the top 10 customers:
233
234 ```
235 AggregateRequest()\
236 .group_by("@customer", r.sum("@paid").alias(FIELDNAME))\
237 .sort_by(Desc("@paid"), max=10)
238 ```
239 """
240 if isinstance(fields, (str, SortDirection)):
241 fields = [fields]
242
243 fields_args = []
244 for f in fields:
245 if isinstance(f, SortDirection):
246 fields_args += [f.field, f.DIRSTRING]
247 else:
248 fields_args += [f]
249
250 ret = ["SORTBY", str(len(fields_args))]
251 ret.extend(fields_args)
252 max = kwargs.get("max", 0)
253 if max > 0:
254 ret += ["MAX", str(max)]
255
256 self._aggregateplan.extend(ret)
257 return self
258
259 def filter(self, expressions):
260 """
261 Specify filter for post-query results using predicates relating to
262 values in the result set.
263
264 ### Parameters
265
266 - **fields**: Fields to group by. This can either be a single string,
267 or a list of strings.
268 """
269 if isinstance(expressions, str):
270 expressions = [expressions]
271
272 for expression in expressions:
273 self._aggregateplan.extend(["FILTER", expression])
274
275 return self
276
277 def with_schema(self):
278 """
279 If set, the `schema` property will contain a list of `[field, type]`
280 entries in the result object.
281 """
282 self._with_schema = True
283 return self
284
285 def verbatim(self):
286 self._verbatim = True
287 return self
288
289 def cursor(self, count=0, max_idle=0.0):
290 args = ["WITHCURSOR"]
291 if count:
292 args += ["COUNT", str(count)]
293 if max_idle:
294 args += ["MAXIDLE", str(max_idle * 1000)]
295 self._cursor = args
296 return self
297
298 def build_args(self):
299 # @foo:bar ...
300 ret = [self._query]
301
302 if self._with_schema:
303 ret.append("WITHSCHEMA")
304
305 if self._verbatim:
306 ret.append("VERBATIM")
307
308 if self._cursor:
309 ret += self._cursor
310
311 if self._loadfields:
312 ret.append("LOAD")
313 ret.append(str(len(self._loadfields)))
314 ret.extend(self._loadfields)
315
316 ret.extend(self._aggregateplan)
317
318 ret += self._limit.build_args()
319
320 return ret
321
322
323 class Cursor:
324 def __init__(self, cid):
325 self.cid = cid
326 self.max_idle = 0
327 self.count = 0
328
329 def build_args(self):
330 args = [str(self.cid)]
331 if self.max_idle:
332 args += ["MAXIDLE", str(self.max_idle)]
333 if self.count:
334 args += ["COUNT", str(self.count)]
335 return args
336
337
338 class AggregateResult:
339 def __init__(self, rows, cursor, schema):
340 self.rows = rows
341 self.cursor = cursor
342 self.schema = schema
343
344 def __repr__(self):
345 cid = self.cursor.cid if self.cursor else -1
346 return (
347 f"<{self.__class__.__name__} at 0x{id(self):x} "
348 f"Rows={len(self.rows)}, Cursor={cid}>"
349 )
350
[end of redis/commands/search/aggregation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py
--- a/redis/commands/search/aggregation.py
+++ b/redis/commands/search/aggregation.py
@@ -103,6 +103,7 @@
self._query = query
self._aggregateplan = []
self._loadfields = []
+ self._loadall = False
self._limit = Limit()
self._max = 0
self._with_schema = False
@@ -116,9 +117,13 @@
### Parameters
- - **fields**: One or more fields in the format of `@field`
+ - **fields**: If fields not specified, all the fields will be loaded.
+ Otherwise, fields should be given in the format of `@field`.
"""
- self._loadfields.extend(fields)
+ if fields:
+ self._loadfields.extend(fields)
+ else:
+ self._loadall = True
return self
def group_by(self, fields, *reducers):
@@ -308,7 +313,10 @@
if self._cursor:
ret += self._cursor
- if self._loadfields:
+ if self._loadall:
+ ret.append("LOAD")
+ ret.append("*")
+ elif self._loadfields:
ret.append("LOAD")
ret.append(str(len(self._loadfields)))
ret.extend(self._loadfields)
|
{"golden_diff": "diff --git a/redis/commands/search/aggregation.py b/redis/commands/search/aggregation.py\n--- a/redis/commands/search/aggregation.py\n+++ b/redis/commands/search/aggregation.py\n@@ -103,6 +103,7 @@\n self._query = query\n self._aggregateplan = []\n self._loadfields = []\n+ self._loadall = False\n self._limit = Limit()\n self._max = 0\n self._with_schema = False\n@@ -116,9 +117,13 @@\n \n ### Parameters\n \n- - **fields**: One or more fields in the format of `@field`\n+ - **fields**: If fields not specified, all the fields will be loaded.\n+ Otherwise, fields should be given in the format of `@field`.\n \"\"\"\n- self._loadfields.extend(fields)\n+ if fields:\n+ self._loadfields.extend(fields)\n+ else:\n+ self._loadall = True\n return self\n \n def group_by(self, fields, *reducers):\n@@ -308,7 +313,10 @@\n if self._cursor:\n ret += self._cursor\n \n- if self._loadfields:\n+ if self._loadall:\n+ ret.append(\"LOAD\")\n+ ret.append(\"*\")\n+ elif self._loadfields:\n ret.append(\"LOAD\")\n ret.append(str(len(self._loadfields)))\n ret.extend(self._loadfields)\n", "issue": "Add support for FT.AGGREGATE with LOAD *\nredis-py should support recent changes to RediSearch. For reference, see the following: RediSearch/RediSearch#2301\n", "before_files": [{"content": "FIELDNAME = object()\n\n\nclass Limit:\n def __init__(self, offset=0, count=0):\n self.offset = offset\n self.count = count\n\n def build_args(self):\n if self.count:\n return [\"LIMIT\", str(self.offset), str(self.count)]\n else:\n return []\n\n\nclass Reducer:\n \"\"\"\n Base reducer object for all reducers.\n\n See the `redisearch.reducers` module for the actual reducers.\n \"\"\"\n\n NAME = None\n\n def __init__(self, *args):\n self._args = args\n self._field = None\n self._alias = None\n\n def alias(self, alias):\n \"\"\"\n Set the alias for this reducer.\n\n ### Parameters\n\n - **alias**: The value of the alias for this reducer. If this is the\n special value `aggregation.FIELDNAME` then this reducer will be\n aliased using the same name as the field upon which it operates.\n Note that using `FIELDNAME` is only possible on reducers which\n operate on a single field value.\n\n This method returns the `Reducer` object making it suitable for\n chaining.\n \"\"\"\n if alias is FIELDNAME:\n if not self._field:\n raise ValueError(\"Cannot use FIELDNAME alias with no field\")\n # Chop off initial '@'\n alias = self._field[1:]\n self._alias = alias\n return self\n\n @property\n def args(self):\n return self._args\n\n\nclass SortDirection:\n \"\"\"\n This special class is used to indicate sort direction.\n \"\"\"\n\n DIRSTRING = None\n\n def __init__(self, field):\n self.field = field\n\n\nclass Asc(SortDirection):\n \"\"\"\n Indicate that the given field should be sorted in ascending order\n \"\"\"\n\n DIRSTRING = \"ASC\"\n\n\nclass Desc(SortDirection):\n \"\"\"\n Indicate that the given field should be sorted in descending order\n \"\"\"\n\n DIRSTRING = \"DESC\"\n\n\nclass AggregateRequest:\n \"\"\"\n Aggregation request which can be passed to `Client.aggregate`.\n \"\"\"\n\n def __init__(self, query=\"*\"):\n \"\"\"\n Create an aggregation request. This request may then be passed to\n `client.aggregate()`.\n\n In order for the request to be usable, it must contain at least one\n group.\n\n - **query** Query string for filtering records.\n\n All member methods (except `build_args()`)\n return the object itself, making them useful for chaining.\n \"\"\"\n self._query = query\n self._aggregateplan = []\n self._loadfields = []\n self._limit = Limit()\n self._max = 0\n self._with_schema = False\n self._verbatim = False\n self._cursor = []\n\n def load(self, *fields):\n \"\"\"\n Indicate the fields to be returned in the response. These fields are\n returned in addition to any others implicitly specified.\n\n ### Parameters\n\n - **fields**: One or more fields in the format of `@field`\n \"\"\"\n self._loadfields.extend(fields)\n return self\n\n def group_by(self, fields, *reducers):\n \"\"\"\n Specify by which fields to group the aggregation.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings. both cases, the field should be specified as\n `@field`.\n - **reducers**: One or more reducers. Reducers may be found in the\n `aggregation` module.\n \"\"\"\n fields = [fields] if isinstance(fields, str) else fields\n reducers = [reducers] if isinstance(reducers, Reducer) else reducers\n\n ret = [\"GROUPBY\", str(len(fields)), *fields]\n for reducer in reducers:\n ret += [\"REDUCE\", reducer.NAME, str(len(reducer.args))]\n ret.extend(reducer.args)\n if reducer._alias is not None:\n ret += [\"AS\", reducer._alias]\n\n self._aggregateplan.extend(ret)\n return self\n\n def apply(self, **kwexpr):\n \"\"\"\n Specify one or more projection expressions to add to each result\n\n ### Parameters\n\n - **kwexpr**: One or more key-value pairs for a projection. The key is\n the alias for the projection, and the value is the projection\n expression itself, for example `apply(square_root=\"sqrt(@foo)\")`\n \"\"\"\n for alias, expr in kwexpr.items():\n ret = [\"APPLY\", expr]\n if alias is not None:\n ret += [\"AS\", alias]\n self._aggregateplan.extend(ret)\n\n return self\n\n def limit(self, offset, num):\n \"\"\"\n Sets the limit for the most recent group or query.\n\n If no group has been defined yet (via `group_by()`) then this sets\n the limit for the initial pool of results from the query. Otherwise,\n this limits the number of items operated on from the previous group.\n\n Setting a limit on the initial search results may be useful when\n attempting to execute an aggregation on a sample of a large data set.\n\n ### Parameters\n\n - **offset**: Result offset from which to begin paging\n - **num**: Number of results to return\n\n\n Example of sorting the initial results:\n\n ```\n AggregateRequest(\"@sale_amount:[10000, inf]\")\\\n .limit(0, 10)\\\n .group_by(\"@state\", r.count())\n ```\n\n Will only group by the states found in the first 10 results of the\n query `@sale_amount:[10000, inf]`. On the other hand,\n\n ```\n AggregateRequest(\"@sale_amount:[10000, inf]\")\\\n .limit(0, 1000)\\\n .group_by(\"@state\", r.count()\\\n .limit(0, 10)\n ```\n\n Will group all the results matching the query, but only return the\n first 10 groups.\n\n If you only wish to return a *top-N* style query, consider using\n `sort_by()` instead.\n\n \"\"\"\n self._limit = Limit(offset, num)\n return self\n\n def sort_by(self, *fields, **kwargs):\n \"\"\"\n Indicate how the results should be sorted. This can also be used for\n *top-N* style queries\n\n ### Parameters\n\n - **fields**: The fields by which to sort. This can be either a single\n field or a list of fields. If you wish to specify order, you can\n use the `Asc` or `Desc` wrapper classes.\n - **max**: Maximum number of results to return. This can be\n used instead of `LIMIT` and is also faster.\n\n\n Example of sorting by `foo` ascending and `bar` descending:\n\n ```\n sort_by(Asc(\"@foo\"), Desc(\"@bar\"))\n ```\n\n Return the top 10 customers:\n\n ```\n AggregateRequest()\\\n .group_by(\"@customer\", r.sum(\"@paid\").alias(FIELDNAME))\\\n .sort_by(Desc(\"@paid\"), max=10)\n ```\n \"\"\"\n if isinstance(fields, (str, SortDirection)):\n fields = [fields]\n\n fields_args = []\n for f in fields:\n if isinstance(f, SortDirection):\n fields_args += [f.field, f.DIRSTRING]\n else:\n fields_args += [f]\n\n ret = [\"SORTBY\", str(len(fields_args))]\n ret.extend(fields_args)\n max = kwargs.get(\"max\", 0)\n if max > 0:\n ret += [\"MAX\", str(max)]\n\n self._aggregateplan.extend(ret)\n return self\n\n def filter(self, expressions):\n \"\"\"\n Specify filter for post-query results using predicates relating to\n values in the result set.\n\n ### Parameters\n\n - **fields**: Fields to group by. This can either be a single string,\n or a list of strings.\n \"\"\"\n if isinstance(expressions, str):\n expressions = [expressions]\n\n for expression in expressions:\n self._aggregateplan.extend([\"FILTER\", expression])\n\n return self\n\n def with_schema(self):\n \"\"\"\n If set, the `schema` property will contain a list of `[field, type]`\n entries in the result object.\n \"\"\"\n self._with_schema = True\n return self\n\n def verbatim(self):\n self._verbatim = True\n return self\n\n def cursor(self, count=0, max_idle=0.0):\n args = [\"WITHCURSOR\"]\n if count:\n args += [\"COUNT\", str(count)]\n if max_idle:\n args += [\"MAXIDLE\", str(max_idle * 1000)]\n self._cursor = args\n return self\n\n def build_args(self):\n # @foo:bar ...\n ret = [self._query]\n\n if self._with_schema:\n ret.append(\"WITHSCHEMA\")\n\n if self._verbatim:\n ret.append(\"VERBATIM\")\n\n if self._cursor:\n ret += self._cursor\n\n if self._loadfields:\n ret.append(\"LOAD\")\n ret.append(str(len(self._loadfields)))\n ret.extend(self._loadfields)\n\n ret.extend(self._aggregateplan)\n\n ret += self._limit.build_args()\n\n return ret\n\n\nclass Cursor:\n def __init__(self, cid):\n self.cid = cid\n self.max_idle = 0\n self.count = 0\n\n def build_args(self):\n args = [str(self.cid)]\n if self.max_idle:\n args += [\"MAXIDLE\", str(self.max_idle)]\n if self.count:\n args += [\"COUNT\", str(self.count)]\n return args\n\n\nclass AggregateResult:\n def __init__(self, rows, cursor, schema):\n self.rows = rows\n self.cursor = cursor\n self.schema = schema\n\n def __repr__(self):\n cid = self.cursor.cid if self.cursor else -1\n return (\n f\"<{self.__class__.__name__} at 0x{id(self):x} \"\n f\"Rows={len(self.rows)}, Cursor={cid}>\"\n )\n", "path": "redis/commands/search/aggregation.py"}]}
| 3,770 | 325 |
gh_patches_debug_5297
|
rasdani/github-patches
|
git_diff
|
cornellius-gp__gpytorch-1012
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] IndexError when GridInterpolationKernel wraps a PeriodicKernel
# 🐛 Bug
I tried to adapt the [KISS-GP for 1D data tutorial](https://gpytorch.readthedocs.io/en/latest/examples/02_Scalable_Exact_GPs/KISSGP_Regression.html#KISS-GP-for-1D-Data) to use a `PeriodicKernel` instead of an `RBFKernel`.
However, trying to evaluate the MLL fails with an `IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)`, thrown from [grid_kernel.py: 133](https://github.com/cornellius-gp/gpytorch/blob/0317b121ebaaa921a7851a6af4f2219ff18eeaf0/gpytorch/kernels/grid_kernel.py#L133).
It seems to me this can only be a bug, as an RBF kernel and a Periodic kernel are really not very different semantically?
## To reproduce
** Code snippet to reproduce **
```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 7 13:52:48 2020
@author: val
"""
import math
import torch
import gpytorch
#%matplotlib inline
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * math.pi) + torch.randn(train_x.size()) * 0.2)
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that. Here we are using a grid that has the same number of points as the training data (a ratio of 1.0). Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set.
grid_size = gpytorch.utils.grid.choose_grid_size(train_x,1.0)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
## NOTE the only difference from the 1D KISS-GP tutorial is that the RBFKernel got replaced with a PeriodicKernel()
gpytorch.kernels.PeriodicKernel(), #gpytorch.kernels.RBFKernel(),
grid_size=grid_size, num_dims=1
)
)
#self.covar_module = gpytorch.kernels.PeriodicKernel()
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
training_iterations = 1
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam([
{'params': model.parameters()}, # Includes GaussianLikelihood parameters
], lr=0.1)
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(training_iterations):
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y) ## NOTE fails here.
loss.backward()
optimizer.step()
```
** Stack trace/error message **
```
Traceback (most recent call last):
File "<ipython-input-13-effc04c4ab77>", line 61, in <module>
loss = -mll(output, train_y) ## NOTE fails here.
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py", line 24, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py", line 51, in forward
res = output.log_prob(target)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py", line 135, in log_prob
inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1038, in inv_quad_logdet
args = self.representation()
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py", line 1268, in representation
representation += list(arg.representation())
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 311, in representation
return self.evaluate_kernel().representation()
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/utils/memoize.py", line 34, in g
add_to_cache(self, cache_name, method(self, *args, **kwargs))
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 278, in evaluate_kernel
res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/kernel.py", line 395, in __call__
res = super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py", line 24, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/scale_kernel.py", line 90, in forward
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 177, in forward
base_lazy_tsr = lazify(self._inducing_forward(last_dim_is_batch=last_dim_is_batch, **params))
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 143, in _inducing_forward
return super().forward(self.grid, self.grid, last_dim_is_batch=last_dim_is_batch, **params)
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py", line 133, in forward
covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]
File "/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py", line 133, in <listcomp>
covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)
```
## Expected Behavior
I expected the training loop to terminate successfully, without throwing errors.
## System information
**Please complete the following information:**
- GPyTorch Version: 1.0.0
- PyTorch Version: 1.3.1
- Computer OS macOS High Sierra 10.13.3
</issue>
<code>
[start of gpytorch/kernels/periodic_kernel.py]
1 #!/usr/bin/env python3
2
3 import math
4
5 import torch
6
7 from ..constraints import Positive
8 from .kernel import Kernel
9
10
11 class PeriodicKernel(Kernel):
12 r""" Computes a covariance matrix based on the periodic kernel
13 between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
14
15 .. math::
16
17 \begin{equation*}
18 k_{\text{Periodic}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left(
19 \frac{2 \sin^2 \left( \pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_1 / p \right) }
20 { \ell^2 } \right)
21 \end{equation*}
22
23 where
24
25 * :math:`p` is the periord length parameter.
26 * :math:`\ell` is a lengthscale parameter.
27
28 .. note::
29
30 This kernel does not have an `outputscale` parameter. To add a scaling parameter,
31 decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
32
33 .. note::
34
35 This kernel does not have an ARD lengthscale option.
36
37 Args:
38 :attr:`batch_shape` (torch.Size, optional):
39 Set this if you want a separate lengthscale for each
40 batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
41 :attr:`active_dims` (tuple of ints, optional):
42 Set this if you want to compute the covariance of only a few input dimensions. The ints
43 corresponds to the indices of the dimensions. Default: `None`.
44 :attr:`period_length_prior` (Prior, optional):
45 Set this if you want to apply a prior to the period length parameter. Default: `None`.
46 :attr:`lengthscale_prior` (Prior, optional):
47 Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
48 :attr:`lengthscale_constraint` (Constraint, optional):
49 Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.
50 :attr:`period_length_constraint` (Constraint, optional):
51 Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.
52 :attr:`eps` (float):
53 The minimum value that the lengthscale/period length can take
54 (prevents divide by zero errors). Default: `1e-6`.
55
56 Attributes:
57 :attr:`lengthscale` (Tensor):
58 The lengthscale parameter. Size = `*batch_shape x 1 x 1`.
59 :attr:`period_length` (Tensor):
60 The period length parameter. Size = `*batch_shape x 1 x 1`.
61
62 Example:
63 >>> x = torch.randn(10, 5)
64 >>> # Non-batch: Simple option
65 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
66 >>>
67 >>> batch_x = torch.randn(2, 10, 5)
68 >>> # Batch: Simple option
69 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
70 >>> # Batch: different lengthscale for each batch
71 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))
72 >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)
73 """
74
75 has_lengthscale = True
76
77 def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):
78 super(PeriodicKernel, self).__init__(**kwargs)
79 if period_length_constraint is None:
80 period_length_constraint = Positive()
81
82 self.register_parameter(
83 name="raw_period_length", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))
84 )
85
86 if period_length_prior is not None:
87 self.register_prior(
88 "period_length_prior",
89 period_length_prior,
90 lambda: self.period_length,
91 lambda v: self._set_period_length(v),
92 )
93
94 self.register_constraint("raw_period_length", period_length_constraint)
95
96 @property
97 def period_length(self):
98 return self.raw_period_length_constraint.transform(self.raw_period_length)
99
100 @period_length.setter
101 def period_length(self, value):
102 self._set_period_length(value)
103
104 def _set_period_length(self, value):
105 if not torch.is_tensor(value):
106 value = torch.as_tensor(value).to(self.raw_period_length)
107 self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))
108
109 def forward(self, x1, x2, diag=False, **params):
110 x1_ = x1.div(self.period_length)
111 x2_ = x2.div(self.period_length)
112 diff = self.covar_dist(x1_, x2_, diag=diag, **params)
113 res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
114 if diff.ndimension() == 2 or diag:
115 res = res.squeeze(0)
116 return res
117
[end of gpytorch/kernels/periodic_kernel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py
--- a/gpytorch/kernels/periodic_kernel.py
+++ b/gpytorch/kernels/periodic_kernel.py
@@ -111,6 +111,6 @@
x2_ = x2.div(self.period_length)
diff = self.covar_dist(x1_, x2_, diag=diag, **params)
res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
- if diff.ndimension() == 2 or diag:
+ if diag:
res = res.squeeze(0)
return res
|
{"golden_diff": "diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py\n--- a/gpytorch/kernels/periodic_kernel.py\n+++ b/gpytorch/kernels/periodic_kernel.py\n@@ -111,6 +111,6 @@\n x2_ = x2.div(self.period_length)\n diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n- if diff.ndimension() == 2 or diag:\n+ if diag:\n res = res.squeeze(0)\n return res\n", "issue": "[Bug] IndexError when GridInterpolationKernel wraps a PeriodicKernel\n# \ud83d\udc1b Bug\r\n\r\nI tried to adapt the [KISS-GP for 1D data tutorial](https://gpytorch.readthedocs.io/en/latest/examples/02_Scalable_Exact_GPs/KISSGP_Regression.html#KISS-GP-for-1D-Data) to use a `PeriodicKernel` instead of an `RBFKernel`.\r\n\r\nHowever, trying to evaluate the MLL fails with an `IndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)`, thrown from [grid_kernel.py: 133](https://github.com/cornellius-gp/gpytorch/blob/0317b121ebaaa921a7851a6af4f2219ff18eeaf0/gpytorch/kernels/grid_kernel.py#L133).\r\n\r\nIt seems to me this can only be a bug, as an RBF kernel and a Periodic kernel are really not very different semantically?\r\n\r\n## To reproduce\r\n\r\n** Code snippet to reproduce **\r\n\r\n```python\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 7 13:52:48 2020\r\n\r\n@author: val\r\n\"\"\"\r\n\r\n\r\nimport math\r\nimport torch\r\nimport gpytorch\r\n\r\n#%matplotlib inline\r\n\r\ntrain_x = torch.linspace(0, 1, 1000)\r\ntrain_y = torch.sin(train_x * (4 * math.pi) + torch.randn(train_x.size()) * 0.2)\r\n\r\nclass GPRegressionModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood):\r\n super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)\r\n\r\n # SKI requires a grid size hyperparameter. This util can help with that. Here we are using a grid that has the same number of points as the training data (a ratio of 1.0). Performance can be sensitive to this parameter, so you may want to adjust it for your own problem on a validation set.\r\n grid_size = gpytorch.utils.grid.choose_grid_size(train_x,1.0)\r\n\r\n self.mean_module = gpytorch.means.ConstantMean()\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.GridInterpolationKernel( \r\n ## NOTE the only difference from the 1D KISS-GP tutorial is that the RBFKernel got replaced with a PeriodicKernel()\r\n gpytorch.kernels.PeriodicKernel(), #gpytorch.kernels.RBFKernel(),\r\n grid_size=grid_size, num_dims=1\r\n )\r\n )\r\n #self.covar_module = gpytorch.kernels.PeriodicKernel() \r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x)\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n\r\n\r\nlikelihood = gpytorch.likelihoods.GaussianLikelihood()\r\nmodel = GPRegressionModel(train_x, train_y, likelihood)\r\n\r\ntraining_iterations = 1\r\n\r\n# Find optimal model hyperparameters\r\nmodel.train()\r\nlikelihood.train()\r\n\r\n# Use the adam optimizer\r\noptimizer = torch.optim.Adam([\r\n {'params': model.parameters()}, # Includes GaussianLikelihood parameters\r\n], lr=0.1)\r\n\r\n# \"Loss\" for GPs - the marginal log likelihood\r\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\r\n\r\nfor i in range(training_iterations):\r\n optimizer.zero_grad()\r\n output = model(train_x)\r\n loss = -mll(output, train_y) ## NOTE fails here.\r\n loss.backward()\r\n optimizer.step()\r\n```\r\n\r\n** Stack trace/error message **\r\n\r\n```\r\nTraceback (most recent call last):\r\n\r\n File \"<ipython-input-13-effc04c4ab77>\", line 61, in <module>\r\n loss = -mll(output, train_y) ## NOTE fails here.\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py\", line 24, in __call__\r\n outputs = self.forward(*inputs, **kwargs)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py\", line 51, in forward\r\n res = output.log_prob(target)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py\", line 135, in log_prob\r\n inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py\", line 1038, in inv_quad_logdet\r\n args = self.representation()\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py\", line 1268, in representation\r\n representation += list(arg.representation())\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py\", line 311, in representation\r\n return self.evaluate_kernel().representation()\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/utils/memoize.py\", line 34, in g\r\n add_to_cache(self, cache_name, method(self, *args, **kwargs))\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py\", line 278, in evaluate_kernel\r\n res = self.kernel(x1, x2, diag=False, last_dim_is_batch=self.last_dim_is_batch, **self.params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/kernel.py\", line 395, in __call__\r\n res = super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/module.py\", line 24, in __call__\r\n outputs = self.forward(*inputs, **kwargs)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/scale_kernel.py\", line 90, in forward\r\n orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py\", line 177, in forward\r\n base_lazy_tsr = lazify(self._inducing_forward(last_dim_is_batch=last_dim_is_batch, **params))\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_interpolation_kernel.py\", line 143, in _inducing_forward\r\n return super().forward(self.grid, self.grid, last_dim_is_batch=last_dim_is_batch, **params)\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py\", line 133, in forward\r\n covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]\r\n\r\n File \"/Users/val/opt/anaconda3/lib/python3.7/site-packages/gpytorch/kernels/grid_kernel.py\", line 133, in <listcomp>\r\n covars = [ToeplitzLazyTensor(c.squeeze(-2)) for c in covars]\r\n\r\nIndexError: Dimension out of range (expected to be in range of [-1, 0], but got -2)\r\n```\r\n\r\n## Expected Behavior\r\n\r\nI expected the training loop to terminate successfully, without throwing errors.\r\n\r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- GPyTorch Version: 1.0.0\r\n- PyTorch Version: 1.3.1\r\n- Computer OS macOS High Sierra 10.13.3\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport math\n\nimport torch\n\nfrom ..constraints import Positive\nfrom .kernel import Kernel\n\n\nclass PeriodicKernel(Kernel):\n r\"\"\" Computes a covariance matrix based on the periodic kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n\n \\begin{equation*}\n k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n \\frac{2 \\sin^2 \\left( \\pi \\Vert \\mathbf{x_1} - \\mathbf{x_2} \\Vert_1 / p \\right) }\n { \\ell^2 } \\right)\n \\end{equation*}\n\n where\n\n * :math:`p` is the periord length parameter.\n * :math:`\\ell` is a lengthscale parameter.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n .. note::\n\n This kernel does not have an ARD lengthscale option.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`period_length_prior` (Prior, optional):\n Set this if you want to apply a prior to the period length parameter. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.\n :attr:`period_length_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale/period length can take\n (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size = `*batch_shape x 1 x 1`.\n :attr:`period_length` (Tensor):\n The period length parameter. Size = `*batch_shape x 1 x 1`.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))\n >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):\n super(PeriodicKernel, self).__init__(**kwargs)\n if period_length_constraint is None:\n period_length_constraint = Positive()\n\n self.register_parameter(\n name=\"raw_period_length\", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))\n )\n\n if period_length_prior is not None:\n self.register_prior(\n \"period_length_prior\",\n period_length_prior,\n lambda: self.period_length,\n lambda v: self._set_period_length(v),\n )\n\n self.register_constraint(\"raw_period_length\", period_length_constraint)\n\n @property\n def period_length(self):\n return self.raw_period_length_constraint.transform(self.raw_period_length)\n\n @period_length.setter\n def period_length(self, value):\n self._set_period_length(value)\n\n def _set_period_length(self, value):\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_period_length)\n self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.period_length)\n x2_ = x2.div(self.period_length)\n diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n if diff.ndimension() == 2 or diag:\n res = res.squeeze(0)\n return res\n", "path": "gpytorch/kernels/periodic_kernel.py"}]}
| 3,771 | 153 |
gh_patches_debug_21
|
rasdani/github-patches
|
git_diff
|
sbi-dev__sbi-31
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move tests to top folder
</issue>
<code>
[start of sbi/dummytests/whateverstuff.py]
1 # content of test_compute.py
2
3
4 def test_compute(param1):
5 assert param1 < 4
[end of sbi/dummytests/whateverstuff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py
deleted file mode 100644
--- a/sbi/dummytests/whateverstuff.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# content of test_compute.py
-
-
-def test_compute(param1):
- assert param1 < 4
\ No newline at end of file
|
{"golden_diff": "diff --git a/sbi/dummytests/whateverstuff.py b/sbi/dummytests/whateverstuff.py\ndeleted file mode 100644\n--- a/sbi/dummytests/whateverstuff.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-# content of test_compute.py\n-\n-\n-def test_compute(param1):\n- assert param1 < 4\n\\ No newline at end of file\n", "issue": "Move tests to top folder\n\n", "before_files": [{"content": "# content of test_compute.py\n\n\ndef test_compute(param1):\n assert param1 < 4", "path": "sbi/dummytests/whateverstuff.py"}]}
| 576 | 95 |
gh_patches_debug_42
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-221
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
make setup.py an executable script
Change file permissions and reformat the shebang line
</issue>
<code>
[start of setup.py]
1 # !/usr/bin/env python
2
3 import os
4 from setuptools import setup, find_packages
5
6
7 def package_files(directory):
8 paths = []
9 for (path, directories, filenames) in os.walk(directory):
10 for filename in filenames:
11 paths.append(os.path.join('..', path, filename))
12 return paths
13
14
15 here = os.path.abspath(os.path.dirname(__file__))
16
17 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))
18 extra_files.append(os.path.join(here, 'CONTRIBUTORS'))
19 extra_files.append(os.path.join(here, 'LICENSE'))
20 extra_files.append(os.path.join(here, 'README.rst'))
21 extra_files.append(os.path.join(here, 'requirements.txt'))
22 extra_files.append(os.path.join(here, 'VERSION'))
23 extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))
24
25 with open(os.path.join(here, 'requirements.txt')) as f:
26 required = f.read().splitlines()
27
28 with open(os.path.join(here, 'VERSION')) as version_file:
29 version = version_file.read().strip()
30
31 setup(
32 name='WrightTools',
33 packages=find_packages(),
34 package_data={'': extra_files},
35 setup_requires=['pytest-runner'],
36 tests_require=['pytest'],
37 install_requires=required,
38 extras_require={'docs': ['sphinx-gallery>=0.1.9']},
39 version=version,
40 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',
41 author='Blaise Thompson',
42 author_email='[email protected]',
43 license='MIT',
44 url='http://wright.tools',
45 keywords='spectroscopy science multidimensional visualization',
46 classifiers=['Development Status :: 5 - Production/Stable',
47 'Intended Audience :: Science/Research',
48 'License :: OSI Approved :: MIT License',
49 'Natural Language :: English',
50 'Programming Language :: Python :: 2',
51 'Programming Language :: Python :: 2.7',
52 'Programming Language :: Python :: 3',
53 'Programming Language :: Python :: 3.3',
54 'Programming Language :: Python :: 3.4',
55 'Programming Language :: Python :: 3.5',
56 'Topic :: Scientific/Engineering']
57 )
58
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-# !/usr/bin/env python
+#! /usr/bin/env python
import os
from setuptools import setup, find_packages
|
{"golden_diff": "diff --git a/setup.py b/setup.py\nold mode 100644\nnew mode 100755\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,4 @@\n-# !/usr/bin/env python\n+#! /usr/bin/env python\n \n import os\n from setuptools import setup, find_packages\n", "issue": "make setup.py an executable script\nChange file permissions and reformat the shebang line\n", "before_files": [{"content": "# !/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]}
| 1,131 | 78 |
gh_patches_debug_8151
|
rasdani/github-patches
|
git_diff
|
google__turbinia-826
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jupyter FileExtractionTask task fails
```
2021-02-25 17:17:55 [INFO] Execution of [['sudo', 'image_export.py', '--logfile', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/4a7632dfe76c428797ce0ddeafc684bf.log', '-w', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/export', '--partitions', 'all', '--artifact_filters', 'JupyterConfigFile', '-d', '/dev/loop1']] failed with status 1
2021-02-25 17:17:55 [INFO] image_export.py failed for artifact JupyterConfigFile.
```
https://github.com/google/turbinia/pull/766/checks?check_run_id=1981113701#step:9:842
</issue>
<code>
[start of turbinia/workers/analysis/jupyter.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2020 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task for analysing Jupyter."""
16
17 from __future__ import unicode_literals
18
19 import os
20 import re
21
22 from turbinia import TurbiniaException
23 from turbinia.evidence import EvidenceState as state
24 from turbinia.evidence import ReportText
25 from turbinia.lib import text_formatter as fmt
26 from turbinia.workers import TurbiniaTask
27 from turbinia.workers import Priority
28
29
30 class JupyterAnalysisTask(TurbiniaTask):
31 """Task to analyze a Jupyter Notebook config."""
32
33 REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]
34
35 def run(self, evidence, result):
36 """Run the Jupyter worker.
37
38 Args:
39 evidence (Evidence object): The evidence to process
40 result (TurbiniaTaskResult): The object to place task results into.
41
42 Returns:
43 TurbiniaTaskResult object.
44 """
45
46 # Where to store the resulting output file.
47 output_file_name = 'jupyter_analysis.txt'
48 output_file_path = os.path.join(self.output_dir, output_file_name)
49
50 # What type of evidence we should output.
51 output_evidence = ReportText(source_path=output_file_path)
52
53 # Read the config file.
54
55 jupyter_config = open(evidence.local_path, 'r').read()
56
57 # Extract the config and return the report
58 (report, priority, summary) = self.analyse_config(jupyter_config)
59 output_evidence.text_data = report
60 result.report_priority = priority
61 result.report_data = report
62
63 # Write the report to the output file.
64 with open(output_file_path, 'w') as fh:
65 fh.write(output_evidence.text_data.encode('utf8'))
66 fh.write('\n'.encode('utf8'))
67
68 # Add the resulting evidence to the result object.
69 result.add_evidence(output_evidence, evidence.config)
70 result.close(self, success=True, status=summary)
71
72 return result
73
74 def analyse_config(self, jupyter_config):
75 """Extract security related configs from Jupyter configuration files.
76
77 Args:
78 config (str): configuration file content.
79
80 Returns:
81 Tuple(
82 report_text(str): The report data
83 report_priority(int): The priority of the report (0 - 100)
84 summary(str): A summary of the report (used for task status)
85 )
86 """
87 findings = []
88 num_misconfigs = 0
89 for line in jupyter_config.split('\n'):
90
91 if all(x in line for x in ['disable_check_xsrf', 'True']):
92 findings.append(fmt.bullet('XSRF protection is disabled.'))
93 num_misconfigs += 1
94 continue
95 if all(x in line for x in ['allow_root', 'True']):
96 findings.append(fmt.bullet('Juypter Notebook allowed to run as root.'))
97 num_misconfigs += 1
98 continue
99 if 'NotebookApp.password' in line:
100 if all(x in line for x in ['required', 'False']):
101 findings.append(
102 fmt.bullet(
103 'Password is not required to access this Jupyter Notebook.'))
104 num_misconfigs += 1
105 continue
106 if 'required' not in line:
107 password_hash = line.split('=')
108 if len(password_hash) > 1:
109 if password_hash[1].strip() == "''":
110 findings.append(
111 fmt.bullet(
112 'There is no password set for this Jupyter Notebook.'))
113 num_misconfigs += 1
114 if all(x in line for x in ['allow_remote_access', 'True']):
115 findings.append(
116 fmt.bullet('Remote access is enabled on this Jupyter Notebook.'))
117 num_misconfigs += 1
118 continue
119
120 if findings:
121 summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(
122 num_misconfigs)
123 findings.insert(0, fmt.heading4(fmt.bold(summary)))
124 report = '\n'.join(findings)
125 return (report, Priority.HIGH, summary)
126
127 report = 'No issues found in Jupyter Notebook configuration.'
128 return (report, Priority.LOW, report)
129
[end of turbinia/workers/analysis/jupyter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/turbinia/workers/analysis/jupyter.py b/turbinia/workers/analysis/jupyter.py
--- a/turbinia/workers/analysis/jupyter.py
+++ b/turbinia/workers/analysis/jupyter.py
@@ -61,9 +61,9 @@
result.report_data = report
# Write the report to the output file.
- with open(output_file_path, 'w') as fh:
- fh.write(output_evidence.text_data.encode('utf8'))
- fh.write('\n'.encode('utf8'))
+ with open(output_file_path, 'w', encoding='utf-8') as fh:
+ fh.write(output_evidence.text_data)
+ fh.write('\n')
# Add the resulting evidence to the result object.
result.add_evidence(output_evidence, evidence.config)
|
{"golden_diff": "diff --git a/turbinia/workers/analysis/jupyter.py b/turbinia/workers/analysis/jupyter.py\n--- a/turbinia/workers/analysis/jupyter.py\n+++ b/turbinia/workers/analysis/jupyter.py\n@@ -61,9 +61,9 @@\n result.report_data = report\n \n # Write the report to the output file.\n- with open(output_file_path, 'w') as fh:\n- fh.write(output_evidence.text_data.encode('utf8'))\n- fh.write('\\n'.encode('utf8'))\n+ with open(output_file_path, 'w', encoding='utf-8') as fh:\n+ fh.write(output_evidence.text_data)\n+ fh.write('\\n')\n \n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n", "issue": "Jupyter FileExtractionTask task fails\n```\r\n2021-02-25 17:17:55 [INFO] Execution of [['sudo', 'image_export.py', '--logfile', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/4a7632dfe76c428797ce0ddeafc684bf.log', '-w', '/evidence/1614273470-4a7632dfe76c428797ce0ddeafc684bf-FileArtifactExtractionTask/export', '--partitions', 'all', '--artifact_filters', 'JupyterConfigFile', '-d', '/dev/loop1']] failed with status 1\r\n2021-02-25 17:17:55 [INFO] image_export.py failed for artifact JupyterConfigFile.\r\n```\r\n\r\nhttps://github.com/google/turbinia/pull/766/checks?check_run_id=1981113701#step:9:842\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task for analysing Jupyter.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport re\n\nfrom turbinia import TurbiniaException\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import ReportText\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.workers import Priority\n\n\nclass JupyterAnalysisTask(TurbiniaTask):\n \"\"\"Task to analyze a Jupyter Notebook config.\"\"\"\n\n REQUIRED_STATES = [state.ATTACHED, state.MOUNTED]\n\n def run(self, evidence, result):\n \"\"\"Run the Jupyter worker.\n\n Args:\n evidence (Evidence object): The evidence to process\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n\n # Where to store the resulting output file.\n output_file_name = 'jupyter_analysis.txt'\n output_file_path = os.path.join(self.output_dir, output_file_name)\n\n # What type of evidence we should output.\n output_evidence = ReportText(source_path=output_file_path)\n\n # Read the config file.\n\n jupyter_config = open(evidence.local_path, 'r').read()\n\n # Extract the config and return the report\n (report, priority, summary) = self.analyse_config(jupyter_config)\n output_evidence.text_data = report\n result.report_priority = priority\n result.report_data = report\n\n # Write the report to the output file.\n with open(output_file_path, 'w') as fh:\n fh.write(output_evidence.text_data.encode('utf8'))\n fh.write('\\n'.encode('utf8'))\n\n # Add the resulting evidence to the result object.\n result.add_evidence(output_evidence, evidence.config)\n result.close(self, success=True, status=summary)\n\n return result\n\n def analyse_config(self, jupyter_config):\n \"\"\"Extract security related configs from Jupyter configuration files.\n\n Args:\n config (str): configuration file content.\n\n Returns:\n Tuple(\n report_text(str): The report data\n report_priority(int): The priority of the report (0 - 100)\n summary(str): A summary of the report (used for task status)\n )\n \"\"\"\n findings = []\n num_misconfigs = 0\n for line in jupyter_config.split('\\n'):\n\n if all(x in line for x in ['disable_check_xsrf', 'True']):\n findings.append(fmt.bullet('XSRF protection is disabled.'))\n num_misconfigs += 1\n continue\n if all(x in line for x in ['allow_root', 'True']):\n findings.append(fmt.bullet('Juypter Notebook allowed to run as root.'))\n num_misconfigs += 1\n continue\n if 'NotebookApp.password' in line:\n if all(x in line for x in ['required', 'False']):\n findings.append(\n fmt.bullet(\n 'Password is not required to access this Jupyter Notebook.'))\n num_misconfigs += 1\n continue\n if 'required' not in line:\n password_hash = line.split('=')\n if len(password_hash) > 1:\n if password_hash[1].strip() == \"''\":\n findings.append(\n fmt.bullet(\n 'There is no password set for this Jupyter Notebook.'))\n num_misconfigs += 1\n if all(x in line for x in ['allow_remote_access', 'True']):\n findings.append(\n fmt.bullet('Remote access is enabled on this Jupyter Notebook.'))\n num_misconfigs += 1\n continue\n\n if findings:\n summary = 'Insecure Jupyter Notebook configuration found. Total misconfigs: {}'.format(\n num_misconfigs)\n findings.insert(0, fmt.heading4(fmt.bold(summary)))\n report = '\\n'.join(findings)\n return (report, Priority.HIGH, summary)\n\n report = 'No issues found in Jupyter Notebook configuration.'\n return (report, Priority.LOW, report)\n", "path": "turbinia/workers/analysis/jupyter.py"}]}
| 2,140 | 186 |
gh_patches_debug_33837
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-9248
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gravatar,initials doesn't fallback when gravatar.com is unreachable
**Describe the bug**
On System -> Settings -> Avatar section, there is this blurb of text:
`Multiple values can be set, comma-separated, and authentik will fallback to the next mode when no avatar could be found. For example, setting this to gravatar,initials will attempt to get an avatar from Gravatar, and if the user has not configured on there, it will fallback to a generated avatar.`
However, if authentik is deployed in an environment that doesn't allow a network connection to gravatar.com, i.e. firewall or similar, even the setting "gravatar,initials" leaves each user's avatar as a broken thumbnail, and initials are never retrieved. Further, each GET of a user ends up being really slow. i.e. retrieving 150k users by paginating through /core/users/ takes 12 hours.
Once changing the setting to "initials" only, retrieving 150k users takes about 2 minutes, and users have proper initials avatars.
**To Reproduce**
Set global Avatar setting to gravatar,initials and block outgoing connections to gravatar.com
**Expected behavior**
Failback to using initials setting
**Version and Deployment (please complete the following information):**
- authentik version: [e.g. 2024.2.2]
- Deployment: [e.g. custom helm]
</issue>
<code>
[start of authentik/lib/avatars.py]
1 """Avatar utils"""
2
3 from base64 import b64encode
4 from functools import cache as funccache
5 from hashlib import md5
6 from typing import TYPE_CHECKING
7 from urllib.parse import urlencode
8
9 from django.core.cache import cache
10 from django.http import HttpRequest, HttpResponseNotFound
11 from django.templatetags.static import static
12 from lxml import etree # nosec
13 from lxml.etree import Element, SubElement # nosec
14 from requests.exceptions import RequestException
15
16 from authentik.lib.config import get_path_from_dict
17 from authentik.lib.utils.http import get_http_session
18 from authentik.tenants.utils import get_current_tenant
19
20 if TYPE_CHECKING:
21 from authentik.core.models import User
22
23 GRAVATAR_URL = "https://secure.gravatar.com"
24 DEFAULT_AVATAR = static("dist/assets/images/user_default.png")
25 CACHE_KEY_GRAVATAR = "goauthentik.io/lib/avatars/"
26
27 SVG_XML_NS = "http://www.w3.org/2000/svg"
28 SVG_NS_MAP = {None: SVG_XML_NS}
29 # Match fonts used in web UI
30 SVG_FONTS = [
31 "'RedHatText'",
32 "'Overpass'",
33 "overpass",
34 "helvetica",
35 "arial",
36 "sans-serif",
37 ]
38
39
40 def avatar_mode_none(user: "User", mode: str) -> str | None:
41 """No avatar"""
42 return DEFAULT_AVATAR
43
44
45 def avatar_mode_attribute(user: "User", mode: str) -> str | None:
46 """Avatars based on a user attribute"""
47 avatar = get_path_from_dict(user.attributes, mode[11:], default=None)
48 return avatar
49
50
51 def avatar_mode_gravatar(user: "User", mode: str) -> str | None:
52 """Gravatar avatars"""
53 # gravatar uses md5 for their URLs, so md5 can't be avoided
54 mail_hash = md5(user.email.lower().encode("utf-8")).hexdigest() # nosec
55 parameters = [("size", "158"), ("rating", "g"), ("default", "404")]
56 gravatar_url = f"{GRAVATAR_URL}/avatar/{mail_hash}?{urlencode(parameters, doseq=True)}"
57
58 full_key = CACHE_KEY_GRAVATAR + mail_hash
59 if cache.has_key(full_key):
60 cache.touch(full_key)
61 return cache.get(full_key)
62
63 try:
64 # Since we specify a default of 404, do a HEAD request
65 # (HEAD since we don't need the body)
66 # so if that returns a 404, move onto the next mode
67 res = get_http_session().head(gravatar_url, timeout=5)
68 if res.status_code == HttpResponseNotFound.status_code:
69 cache.set(full_key, None)
70 return None
71 res.raise_for_status()
72 except RequestException:
73 return gravatar_url
74 cache.set(full_key, gravatar_url)
75 return gravatar_url
76
77
78 def generate_colors(text: str) -> tuple[str, str]:
79 """Generate colours based on `text`"""
80 color = int(md5(text.lower().encode("utf-8")).hexdigest(), 16) % 0xFFFFFF # nosec
81
82 # Get a (somewhat arbitrarily) reduced scope of colors
83 # to avoid too dark or light backgrounds
84 blue = min(max((color) & 0xFF, 55), 200)
85 green = min(max((color >> 8) & 0xFF, 55), 200)
86 red = min(max((color >> 16) & 0xFF, 55), 200)
87 bg_hex = f"{red:02x}{green:02x}{blue:02x}"
88 # Contrasting text color (https://stackoverflow.com/a/3943023)
89 text_hex = (
90 "000" if (red * 0.299 + green * 0.587 + blue * 0.114) > 186 else "fff" # noqa: PLR2004
91 )
92 return bg_hex, text_hex
93
94
95 @funccache
96 def generate_avatar_from_name(
97 name: str,
98 length: int = 2,
99 size: int = 64,
100 rounded: bool = False,
101 font_size: float = 0.4375,
102 bold: bool = False,
103 uppercase: bool = True,
104 ) -> str:
105 """ "Generate an avatar with initials in SVG format.
106
107 Inspired from: https://github.com/LasseRafn/ui-avatars
108 """
109 name_parts = name.split()
110 # Only abbreviate first and last name
111 if len(name_parts) > 2: # noqa: PLR2004
112 name_parts = [name_parts[0], name_parts[-1]]
113
114 if len(name_parts) == 1:
115 initials = name_parts[0][:length]
116 else:
117 initials = "".join([part[0] for part in name_parts[:-1]])
118 initials += name_parts[-1]
119 initials = initials[:length]
120
121 bg_hex, text_hex = generate_colors(name)
122
123 half_size = size // 2
124 shape = "circle" if rounded else "rect"
125 font_weight = "600" if bold else "400"
126
127 root_element: Element = Element(f"{{{SVG_XML_NS}}}svg", nsmap=SVG_NS_MAP)
128 root_element.attrib["width"] = f"{size}px"
129 root_element.attrib["height"] = f"{size}px"
130 root_element.attrib["viewBox"] = f"0 0 {size} {size}"
131 root_element.attrib["version"] = "1.1"
132
133 shape = SubElement(root_element, f"{{{SVG_XML_NS}}}{shape}", nsmap=SVG_NS_MAP)
134 shape.attrib["fill"] = f"#{bg_hex}"
135 shape.attrib["cx"] = f"{half_size}"
136 shape.attrib["cy"] = f"{half_size}"
137 shape.attrib["width"] = f"{size}"
138 shape.attrib["height"] = f"{size}"
139 shape.attrib["r"] = f"{half_size}"
140
141 text = SubElement(root_element, f"{{{SVG_XML_NS}}}text", nsmap=SVG_NS_MAP)
142 text.attrib["x"] = "50%"
143 text.attrib["y"] = "50%"
144 text.attrib["style"] = (
145 f"color: #{text_hex}; " "line-height: 1; " f"font-family: {','.join(SVG_FONTS)}; "
146 )
147 text.attrib["fill"] = f"#{text_hex}"
148 text.attrib["alignment-baseline"] = "middle"
149 text.attrib["dominant-baseline"] = "middle"
150 text.attrib["text-anchor"] = "middle"
151 text.attrib["font-size"] = f"{round(size * font_size)}"
152 text.attrib["font-weight"] = f"{font_weight}"
153 text.attrib["dy"] = ".1em"
154 text.text = initials if not uppercase else initials.upper()
155
156 return etree.tostring(root_element).decode()
157
158
159 def avatar_mode_generated(user: "User", mode: str) -> str | None:
160 """Wrapper that converts generated avatar to base64 svg"""
161 # By default generate based off of user's display name
162 name = user.name.strip()
163 if name == "":
164 # Fallback to username
165 name = user.username.strip()
166 # If we still don't have anything, fallback to `a k`
167 if name == "":
168 name = "a k"
169 svg = generate_avatar_from_name(name)
170 return f"data:image/svg+xml;base64,{b64encode(svg.encode('utf-8')).decode('utf-8')}"
171
172
173 def avatar_mode_url(user: "User", mode: str) -> str | None:
174 """Format url"""
175 mail_hash = md5(user.email.lower().encode("utf-8")).hexdigest() # nosec
176 return mode % {
177 "username": user.username,
178 "mail_hash": mail_hash,
179 "upn": user.attributes.get("upn", ""),
180 }
181
182
183 def get_avatar(user: "User", request: HttpRequest | None = None) -> str:
184 """Get avatar with configured mode"""
185 mode_map = {
186 "none": avatar_mode_none,
187 "initials": avatar_mode_generated,
188 "gravatar": avatar_mode_gravatar,
189 }
190 tenant = None
191 if request:
192 tenant = request.tenant
193 else:
194 tenant = get_current_tenant()
195 modes: str = tenant.avatars
196 for mode in modes.split(","):
197 avatar = None
198 if mode in mode_map:
199 avatar = mode_map[mode](user, mode)
200 elif mode.startswith("attributes."):
201 avatar = avatar_mode_attribute(user, mode)
202 elif "://" in mode:
203 avatar = avatar_mode_url(user, mode)
204 if avatar:
205 return avatar
206 return avatar_mode_none(user, modes)
207
[end of authentik/lib/avatars.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/authentik/lib/avatars.py b/authentik/lib/avatars.py
--- a/authentik/lib/avatars.py
+++ b/authentik/lib/avatars.py
@@ -11,7 +11,7 @@
from django.templatetags.static import static
from lxml import etree # nosec
from lxml.etree import Element, SubElement # nosec
-from requests.exceptions import RequestException
+from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout
from authentik.lib.config import get_path_from_dict
from authentik.lib.utils.http import get_http_session
@@ -23,6 +23,8 @@
GRAVATAR_URL = "https://secure.gravatar.com"
DEFAULT_AVATAR = static("dist/assets/images/user_default.png")
CACHE_KEY_GRAVATAR = "goauthentik.io/lib/avatars/"
+CACHE_KEY_GRAVATAR_AVAILABLE = "goauthentik.io/lib/avatars/gravatar_available"
+GRAVATAR_STATUS_TTL_SECONDS = 60 * 60 * 8 # 8 Hours
SVG_XML_NS = "http://www.w3.org/2000/svg"
SVG_NS_MAP = {None: SVG_XML_NS}
@@ -50,6 +52,9 @@
def avatar_mode_gravatar(user: "User", mode: str) -> str | None:
"""Gravatar avatars"""
+ if not cache.get(CACHE_KEY_GRAVATAR_AVAILABLE, True):
+ return None
+
# gravatar uses md5 for their URLs, so md5 can't be avoided
mail_hash = md5(user.email.lower().encode("utf-8")).hexdigest() # nosec
parameters = [("size", "158"), ("rating", "g"), ("default", "404")]
@@ -69,6 +74,8 @@
cache.set(full_key, None)
return None
res.raise_for_status()
+ except (Timeout, ConnectionError, HTTPError):
+ cache.set(CACHE_KEY_GRAVATAR_AVAILABLE, False, timeout=GRAVATAR_STATUS_TTL_SECONDS)
except RequestException:
return gravatar_url
cache.set(full_key, gravatar_url)
|
{"golden_diff": "diff --git a/authentik/lib/avatars.py b/authentik/lib/avatars.py\n--- a/authentik/lib/avatars.py\n+++ b/authentik/lib/avatars.py\n@@ -11,7 +11,7 @@\n from django.templatetags.static import static\n from lxml import etree # nosec\n from lxml.etree import Element, SubElement # nosec\n-from requests.exceptions import RequestException\n+from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout\n \n from authentik.lib.config import get_path_from_dict\n from authentik.lib.utils.http import get_http_session\n@@ -23,6 +23,8 @@\n GRAVATAR_URL = \"https://secure.gravatar.com\"\n DEFAULT_AVATAR = static(\"dist/assets/images/user_default.png\")\n CACHE_KEY_GRAVATAR = \"goauthentik.io/lib/avatars/\"\n+CACHE_KEY_GRAVATAR_AVAILABLE = \"goauthentik.io/lib/avatars/gravatar_available\"\n+GRAVATAR_STATUS_TTL_SECONDS = 60 * 60 * 8 # 8 Hours\n \n SVG_XML_NS = \"http://www.w3.org/2000/svg\"\n SVG_NS_MAP = {None: SVG_XML_NS}\n@@ -50,6 +52,9 @@\n \n def avatar_mode_gravatar(user: \"User\", mode: str) -> str | None:\n \"\"\"Gravatar avatars\"\"\"\n+ if not cache.get(CACHE_KEY_GRAVATAR_AVAILABLE, True):\n+ return None\n+\n # gravatar uses md5 for their URLs, so md5 can't be avoided\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n parameters = [(\"size\", \"158\"), (\"rating\", \"g\"), (\"default\", \"404\")]\n@@ -69,6 +74,8 @@\n cache.set(full_key, None)\n return None\n res.raise_for_status()\n+ except (Timeout, ConnectionError, HTTPError):\n+ cache.set(CACHE_KEY_GRAVATAR_AVAILABLE, False, timeout=GRAVATAR_STATUS_TTL_SECONDS)\n except RequestException:\n return gravatar_url\n cache.set(full_key, gravatar_url)\n", "issue": "gravatar,initials doesn't fallback when gravatar.com is unreachable\n**Describe the bug**\r\nOn System -> Settings -> Avatar section, there is this blurb of text:\r\n`Multiple values can be set, comma-separated, and authentik will fallback to the next mode when no avatar could be found. For example, setting this to gravatar,initials will attempt to get an avatar from Gravatar, and if the user has not configured on there, it will fallback to a generated avatar.`\r\n\r\nHowever, if authentik is deployed in an environment that doesn't allow a network connection to gravatar.com, i.e. firewall or similar, even the setting \"gravatar,initials\" leaves each user's avatar as a broken thumbnail, and initials are never retrieved. Further, each GET of a user ends up being really slow. i.e. retrieving 150k users by paginating through /core/users/ takes 12 hours. \r\n\r\nOnce changing the setting to \"initials\" only, retrieving 150k users takes about 2 minutes, and users have proper initials avatars.\r\n\r\n**To Reproduce**\r\nSet global Avatar setting to gravatar,initials and block outgoing connections to gravatar.com\r\n\r\n**Expected behavior**\r\nFailback to using initials setting\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: [e.g. 2024.2.2]\r\n- Deployment: [e.g. custom helm]\r\n\n", "before_files": [{"content": "\"\"\"Avatar utils\"\"\"\n\nfrom base64 import b64encode\nfrom functools import cache as funccache\nfrom hashlib import md5\nfrom typing import TYPE_CHECKING\nfrom urllib.parse import urlencode\n\nfrom django.core.cache import cache\nfrom django.http import HttpRequest, HttpResponseNotFound\nfrom django.templatetags.static import static\nfrom lxml import etree # nosec\nfrom lxml.etree import Element, SubElement # nosec\nfrom requests.exceptions import RequestException\n\nfrom authentik.lib.config import get_path_from_dict\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.tenants.utils import get_current_tenant\n\nif TYPE_CHECKING:\n from authentik.core.models import User\n\nGRAVATAR_URL = \"https://secure.gravatar.com\"\nDEFAULT_AVATAR = static(\"dist/assets/images/user_default.png\")\nCACHE_KEY_GRAVATAR = \"goauthentik.io/lib/avatars/\"\n\nSVG_XML_NS = \"http://www.w3.org/2000/svg\"\nSVG_NS_MAP = {None: SVG_XML_NS}\n# Match fonts used in web UI\nSVG_FONTS = [\n \"'RedHatText'\",\n \"'Overpass'\",\n \"overpass\",\n \"helvetica\",\n \"arial\",\n \"sans-serif\",\n]\n\n\ndef avatar_mode_none(user: \"User\", mode: str) -> str | None:\n \"\"\"No avatar\"\"\"\n return DEFAULT_AVATAR\n\n\ndef avatar_mode_attribute(user: \"User\", mode: str) -> str | None:\n \"\"\"Avatars based on a user attribute\"\"\"\n avatar = get_path_from_dict(user.attributes, mode[11:], default=None)\n return avatar\n\n\ndef avatar_mode_gravatar(user: \"User\", mode: str) -> str | None:\n \"\"\"Gravatar avatars\"\"\"\n # gravatar uses md5 for their URLs, so md5 can't be avoided\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n parameters = [(\"size\", \"158\"), (\"rating\", \"g\"), (\"default\", \"404\")]\n gravatar_url = f\"{GRAVATAR_URL}/avatar/{mail_hash}?{urlencode(parameters, doseq=True)}\"\n\n full_key = CACHE_KEY_GRAVATAR + mail_hash\n if cache.has_key(full_key):\n cache.touch(full_key)\n return cache.get(full_key)\n\n try:\n # Since we specify a default of 404, do a HEAD request\n # (HEAD since we don't need the body)\n # so if that returns a 404, move onto the next mode\n res = get_http_session().head(gravatar_url, timeout=5)\n if res.status_code == HttpResponseNotFound.status_code:\n cache.set(full_key, None)\n return None\n res.raise_for_status()\n except RequestException:\n return gravatar_url\n cache.set(full_key, gravatar_url)\n return gravatar_url\n\n\ndef generate_colors(text: str) -> tuple[str, str]:\n \"\"\"Generate colours based on `text`\"\"\"\n color = int(md5(text.lower().encode(\"utf-8\")).hexdigest(), 16) % 0xFFFFFF # nosec\n\n # Get a (somewhat arbitrarily) reduced scope of colors\n # to avoid too dark or light backgrounds\n blue = min(max((color) & 0xFF, 55), 200)\n green = min(max((color >> 8) & 0xFF, 55), 200)\n red = min(max((color >> 16) & 0xFF, 55), 200)\n bg_hex = f\"{red:02x}{green:02x}{blue:02x}\"\n # Contrasting text color (https://stackoverflow.com/a/3943023)\n text_hex = (\n \"000\" if (red * 0.299 + green * 0.587 + blue * 0.114) > 186 else \"fff\" # noqa: PLR2004\n )\n return bg_hex, text_hex\n\n\n@funccache\ndef generate_avatar_from_name(\n name: str,\n length: int = 2,\n size: int = 64,\n rounded: bool = False,\n font_size: float = 0.4375,\n bold: bool = False,\n uppercase: bool = True,\n) -> str:\n \"\"\" \"Generate an avatar with initials in SVG format.\n\n Inspired from: https://github.com/LasseRafn/ui-avatars\n \"\"\"\n name_parts = name.split()\n # Only abbreviate first and last name\n if len(name_parts) > 2: # noqa: PLR2004\n name_parts = [name_parts[0], name_parts[-1]]\n\n if len(name_parts) == 1:\n initials = name_parts[0][:length]\n else:\n initials = \"\".join([part[0] for part in name_parts[:-1]])\n initials += name_parts[-1]\n initials = initials[:length]\n\n bg_hex, text_hex = generate_colors(name)\n\n half_size = size // 2\n shape = \"circle\" if rounded else \"rect\"\n font_weight = \"600\" if bold else \"400\"\n\n root_element: Element = Element(f\"{{{SVG_XML_NS}}}svg\", nsmap=SVG_NS_MAP)\n root_element.attrib[\"width\"] = f\"{size}px\"\n root_element.attrib[\"height\"] = f\"{size}px\"\n root_element.attrib[\"viewBox\"] = f\"0 0 {size} {size}\"\n root_element.attrib[\"version\"] = \"1.1\"\n\n shape = SubElement(root_element, f\"{{{SVG_XML_NS}}}{shape}\", nsmap=SVG_NS_MAP)\n shape.attrib[\"fill\"] = f\"#{bg_hex}\"\n shape.attrib[\"cx\"] = f\"{half_size}\"\n shape.attrib[\"cy\"] = f\"{half_size}\"\n shape.attrib[\"width\"] = f\"{size}\"\n shape.attrib[\"height\"] = f\"{size}\"\n shape.attrib[\"r\"] = f\"{half_size}\"\n\n text = SubElement(root_element, f\"{{{SVG_XML_NS}}}text\", nsmap=SVG_NS_MAP)\n text.attrib[\"x\"] = \"50%\"\n text.attrib[\"y\"] = \"50%\"\n text.attrib[\"style\"] = (\n f\"color: #{text_hex}; \" \"line-height: 1; \" f\"font-family: {','.join(SVG_FONTS)}; \"\n )\n text.attrib[\"fill\"] = f\"#{text_hex}\"\n text.attrib[\"alignment-baseline\"] = \"middle\"\n text.attrib[\"dominant-baseline\"] = \"middle\"\n text.attrib[\"text-anchor\"] = \"middle\"\n text.attrib[\"font-size\"] = f\"{round(size * font_size)}\"\n text.attrib[\"font-weight\"] = f\"{font_weight}\"\n text.attrib[\"dy\"] = \".1em\"\n text.text = initials if not uppercase else initials.upper()\n\n return etree.tostring(root_element).decode()\n\n\ndef avatar_mode_generated(user: \"User\", mode: str) -> str | None:\n \"\"\"Wrapper that converts generated avatar to base64 svg\"\"\"\n # By default generate based off of user's display name\n name = user.name.strip()\n if name == \"\":\n # Fallback to username\n name = user.username.strip()\n # If we still don't have anything, fallback to `a k`\n if name == \"\":\n name = \"a k\"\n svg = generate_avatar_from_name(name)\n return f\"data:image/svg+xml;base64,{b64encode(svg.encode('utf-8')).decode('utf-8')}\"\n\n\ndef avatar_mode_url(user: \"User\", mode: str) -> str | None:\n \"\"\"Format url\"\"\"\n mail_hash = md5(user.email.lower().encode(\"utf-8\")).hexdigest() # nosec\n return mode % {\n \"username\": user.username,\n \"mail_hash\": mail_hash,\n \"upn\": user.attributes.get(\"upn\", \"\"),\n }\n\n\ndef get_avatar(user: \"User\", request: HttpRequest | None = None) -> str:\n \"\"\"Get avatar with configured mode\"\"\"\n mode_map = {\n \"none\": avatar_mode_none,\n \"initials\": avatar_mode_generated,\n \"gravatar\": avatar_mode_gravatar,\n }\n tenant = None\n if request:\n tenant = request.tenant\n else:\n tenant = get_current_tenant()\n modes: str = tenant.avatars\n for mode in modes.split(\",\"):\n avatar = None\n if mode in mode_map:\n avatar = mode_map[mode](user, mode)\n elif mode.startswith(\"attributes.\"):\n avatar = avatar_mode_attribute(user, mode)\n elif \"://\" in mode:\n avatar = avatar_mode_url(user, mode)\n if avatar:\n return avatar\n return avatar_mode_none(user, modes)\n", "path": "authentik/lib/avatars.py"}]}
| 3,321 | 477 |
gh_patches_debug_41396
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-6063
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multiple-compressed responses support
HTTP response body can be compressed multiple times. In such cases `Content-Encoding` header contains a list of comma-separated encodings. Some servers instead can send multiple `Content-Encoding` headers.
This fix allows HttpCompressionMiddleware handle that.
See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
Fix #5143
Fixes #5143
</issue>
<code>
[start of scrapy/downloadermiddlewares/httpcompression.py]
1 from __future__ import annotations
2
3 import warnings
4 from logging import getLogger
5 from typing import TYPE_CHECKING, List, Optional, Union
6
7 from scrapy import Request, Spider, signals
8 from scrapy.crawler import Crawler
9 from scrapy.exceptions import IgnoreRequest, NotConfigured
10 from scrapy.http import Response, TextResponse
11 from scrapy.responsetypes import responsetypes
12 from scrapy.statscollectors import StatsCollector
13 from scrapy.utils._compression import (
14 _DecompressionMaxSizeExceeded,
15 _inflate,
16 _unbrotli,
17 _unzstd,
18 )
19 from scrapy.utils.deprecate import ScrapyDeprecationWarning
20 from scrapy.utils.gz import gunzip
21
22 if TYPE_CHECKING:
23 # typing.Self requires Python 3.11
24 from typing_extensions import Self
25
26 logger = getLogger(__name__)
27
28 ACCEPTED_ENCODINGS: List[bytes] = [b"gzip", b"deflate"]
29
30 try:
31 import brotli # noqa: F401
32 except ImportError:
33 pass
34 else:
35 ACCEPTED_ENCODINGS.append(b"br")
36
37 try:
38 import zstandard # noqa: F401
39 except ImportError:
40 pass
41 else:
42 ACCEPTED_ENCODINGS.append(b"zstd")
43
44
45 class HttpCompressionMiddleware:
46 """This middleware allows compressed (gzip, deflate) traffic to be
47 sent/received from web sites"""
48
49 def __init__(
50 self,
51 stats: Optional[StatsCollector] = None,
52 *,
53 crawler: Optional[Crawler] = None,
54 ):
55 if not crawler:
56 self.stats = stats
57 self._max_size = 1073741824
58 self._warn_size = 33554432
59 return
60 self.stats = crawler.stats
61 self._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
62 self._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
63 crawler.signals.connect(self.open_spider, signals.spider_opened)
64
65 @classmethod
66 def from_crawler(cls, crawler: Crawler) -> Self:
67 if not crawler.settings.getbool("COMPRESSION_ENABLED"):
68 raise NotConfigured
69 try:
70 return cls(crawler=crawler)
71 except TypeError:
72 warnings.warn(
73 "HttpCompressionMiddleware subclasses must either modify "
74 "their '__init__' method to support a 'crawler' parameter or "
75 "reimplement their 'from_crawler' method.",
76 ScrapyDeprecationWarning,
77 )
78 mw = cls()
79 mw.stats = crawler.stats
80 mw._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
81 mw._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
82 crawler.signals.connect(mw.open_spider, signals.spider_opened)
83 return mw
84
85 def open_spider(self, spider):
86 if hasattr(spider, "download_maxsize"):
87 self._max_size = spider.download_maxsize
88 if hasattr(spider, "download_warnsize"):
89 self._warn_size = spider.download_warnsize
90
91 def process_request(
92 self, request: Request, spider: Spider
93 ) -> Union[Request, Response, None]:
94 request.headers.setdefault("Accept-Encoding", b", ".join(ACCEPTED_ENCODINGS))
95 return None
96
97 def process_response(
98 self, request: Request, response: Response, spider: Spider
99 ) -> Union[Request, Response]:
100 if request.method == "HEAD":
101 return response
102 if isinstance(response, Response):
103 content_encoding = response.headers.getlist("Content-Encoding")
104 if content_encoding:
105 encoding = content_encoding.pop()
106 max_size = request.meta.get("download_maxsize", self._max_size)
107 warn_size = request.meta.get("download_warnsize", self._warn_size)
108 try:
109 decoded_body = self._decode(
110 response.body, encoding.lower(), max_size
111 )
112 except _DecompressionMaxSizeExceeded:
113 raise IgnoreRequest(
114 f"Ignored response {response} because its body "
115 f"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE "
116 f"({max_size} B) during decompression."
117 )
118 if len(response.body) < warn_size <= len(decoded_body):
119 logger.warning(
120 f"{response} body size after decompression "
121 f"({len(decoded_body)} B) is larger than the "
122 f"download warning size ({warn_size} B)."
123 )
124 if self.stats:
125 self.stats.inc_value(
126 "httpcompression/response_bytes",
127 len(decoded_body),
128 spider=spider,
129 )
130 self.stats.inc_value(
131 "httpcompression/response_count", spider=spider
132 )
133 respcls = responsetypes.from_args(
134 headers=response.headers, url=response.url, body=decoded_body
135 )
136 kwargs = dict(cls=respcls, body=decoded_body)
137 if issubclass(respcls, TextResponse):
138 # force recalculating the encoding until we make sure the
139 # responsetypes guessing is reliable
140 kwargs["encoding"] = None
141 response = response.replace(**kwargs)
142 if not content_encoding:
143 del response.headers["Content-Encoding"]
144
145 return response
146
147 def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:
148 if encoding == b"gzip" or encoding == b"x-gzip":
149 return gunzip(body, max_size=max_size)
150 if encoding == b"deflate":
151 return _inflate(body, max_size=max_size)
152 if encoding == b"br" and b"br" in ACCEPTED_ENCODINGS:
153 return _unbrotli(body, max_size=max_size)
154 if encoding == b"zstd" and b"zstd" in ACCEPTED_ENCODINGS:
155 return _unzstd(body, max_size=max_size)
156 return body
157
[end of scrapy/downloadermiddlewares/httpcompression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py
--- a/scrapy/downloadermiddlewares/httpcompression.py
+++ b/scrapy/downloadermiddlewares/httpcompression.py
@@ -1,6 +1,7 @@
from __future__ import annotations
import warnings
+from itertools import chain
from logging import getLogger
from typing import TYPE_CHECKING, List, Optional, Union
@@ -102,18 +103,18 @@
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding:
- encoding = content_encoding.pop()
max_size = request.meta.get("download_maxsize", self._max_size)
warn_size = request.meta.get("download_warnsize", self._warn_size)
try:
- decoded_body = self._decode(
- response.body, encoding.lower(), max_size
+ decoded_body, content_encoding = self._handle_encoding(
+ response.body, content_encoding, max_size
)
except _DecompressionMaxSizeExceeded:
raise IgnoreRequest(
f"Ignored response {response} because its body "
- f"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE "
- f"({max_size} B) during decompression."
+ f"({len(response.body)} B compressed) exceeded "
+ f"DOWNLOAD_MAXSIZE ({max_size} B) during "
+ f"decompression."
)
if len(response.body) < warn_size <= len(decoded_body):
logger.warning(
@@ -121,6 +122,7 @@
f"({len(decoded_body)} B) is larger than the "
f"download warning size ({warn_size} B)."
)
+ response.headers["Content-Encoding"] = content_encoding
if self.stats:
self.stats.inc_value(
"httpcompression/response_bytes",
@@ -144,6 +146,28 @@
return response
+ def _handle_encoding(self, body, content_encoding, max_size):
+ to_decode, to_keep = self._split_encodings(content_encoding)
+ for encoding in to_decode:
+ body = self._decode(body, encoding, max_size)
+ return body, to_keep
+
+ def _split_encodings(self, content_encoding):
+ to_keep = [
+ encoding.strip().lower()
+ for encoding in chain.from_iterable(
+ encodings.split(b",") for encodings in content_encoding
+ )
+ ]
+ to_decode = []
+ while to_keep:
+ encoding = to_keep.pop()
+ if encoding not in ACCEPTED_ENCODINGS:
+ to_keep.append(encoding)
+ return to_decode, to_keep
+ to_decode.append(encoding)
+ return to_decode, to_keep
+
def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:
if encoding == b"gzip" or encoding == b"x-gzip":
return gunzip(body, max_size=max_size)
|
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py\n--- a/scrapy/downloadermiddlewares/httpcompression.py\n+++ b/scrapy/downloadermiddlewares/httpcompression.py\n@@ -1,6 +1,7 @@\n from __future__ import annotations\n \n import warnings\n+from itertools import chain\n from logging import getLogger\n from typing import TYPE_CHECKING, List, Optional, Union\n \n@@ -102,18 +103,18 @@\n if isinstance(response, Response):\n content_encoding = response.headers.getlist(\"Content-Encoding\")\n if content_encoding:\n- encoding = content_encoding.pop()\n max_size = request.meta.get(\"download_maxsize\", self._max_size)\n warn_size = request.meta.get(\"download_warnsize\", self._warn_size)\n try:\n- decoded_body = self._decode(\n- response.body, encoding.lower(), max_size\n+ decoded_body, content_encoding = self._handle_encoding(\n+ response.body, content_encoding, max_size\n )\n except _DecompressionMaxSizeExceeded:\n raise IgnoreRequest(\n f\"Ignored response {response} because its body \"\n- f\"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE \"\n- f\"({max_size} B) during decompression.\"\n+ f\"({len(response.body)} B compressed) exceeded \"\n+ f\"DOWNLOAD_MAXSIZE ({max_size} B) during \"\n+ f\"decompression.\"\n )\n if len(response.body) < warn_size <= len(decoded_body):\n logger.warning(\n@@ -121,6 +122,7 @@\n f\"({len(decoded_body)} B) is larger than the \"\n f\"download warning size ({warn_size} B).\"\n )\n+ response.headers[\"Content-Encoding\"] = content_encoding\n if self.stats:\n self.stats.inc_value(\n \"httpcompression/response_bytes\",\n@@ -144,6 +146,28 @@\n \n return response\n \n+ def _handle_encoding(self, body, content_encoding, max_size):\n+ to_decode, to_keep = self._split_encodings(content_encoding)\n+ for encoding in to_decode:\n+ body = self._decode(body, encoding, max_size)\n+ return body, to_keep\n+\n+ def _split_encodings(self, content_encoding):\n+ to_keep = [\n+ encoding.strip().lower()\n+ for encoding in chain.from_iterable(\n+ encodings.split(b\",\") for encodings in content_encoding\n+ )\n+ ]\n+ to_decode = []\n+ while to_keep:\n+ encoding = to_keep.pop()\n+ if encoding not in ACCEPTED_ENCODINGS:\n+ to_keep.append(encoding)\n+ return to_decode, to_keep\n+ to_decode.append(encoding)\n+ return to_decode, to_keep\n+\n def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:\n if encoding == b\"gzip\" or encoding == b\"x-gzip\":\n return gunzip(body, max_size=max_size)\n", "issue": "Multiple-compressed responses support\nHTTP response body can be compressed multiple times. In such cases `Content-Encoding` header contains a list of comma-separated encodings. Some servers instead can send multiple `Content-Encoding` headers.\r\nThis fix allows HttpCompressionMiddleware handle that.\r\nSee also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\nFix #5143\nFixes #5143 \n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom scrapy import Request, Spider, signals\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.statscollectors import StatsCollector\nfrom scrapy.utils._compression import (\n _DecompressionMaxSizeExceeded,\n _inflate,\n _unbrotli,\n _unzstd,\n)\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\nfrom scrapy.utils.gz import gunzip\n\nif TYPE_CHECKING:\n # typing.Self requires Python 3.11\n from typing_extensions import Self\n\nlogger = getLogger(__name__)\n\nACCEPTED_ENCODINGS: List[bytes] = [b\"gzip\", b\"deflate\"]\n\ntry:\n import brotli # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"br\")\n\ntry:\n import zstandard # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"zstd\")\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n\n def __init__(\n self,\n stats: Optional[StatsCollector] = None,\n *,\n crawler: Optional[Crawler] = None,\n ):\n if not crawler:\n self.stats = stats\n self._max_size = 1073741824\n self._warn_size = 33554432\n return\n self.stats = crawler.stats\n self._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n self._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(self.open_spider, signals.spider_opened)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler) -> Self:\n if not crawler.settings.getbool(\"COMPRESSION_ENABLED\"):\n raise NotConfigured\n try:\n return cls(crawler=crawler)\n except TypeError:\n warnings.warn(\n \"HttpCompressionMiddleware subclasses must either modify \"\n \"their '__init__' method to support a 'crawler' parameter or \"\n \"reimplement their 'from_crawler' method.\",\n ScrapyDeprecationWarning,\n )\n mw = cls()\n mw.stats = crawler.stats\n mw._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n mw._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(mw.open_spider, signals.spider_opened)\n return mw\n\n def open_spider(self, spider):\n if hasattr(spider, \"download_maxsize\"):\n self._max_size = spider.download_maxsize\n if hasattr(spider, \"download_warnsize\"):\n self._warn_size = spider.download_warnsize\n\n def process_request(\n self, request: Request, spider: Spider\n ) -> Union[Request, Response, None]:\n request.headers.setdefault(\"Accept-Encoding\", b\", \".join(ACCEPTED_ENCODINGS))\n return None\n\n def process_response(\n self, request: Request, response: Response, spider: Spider\n ) -> Union[Request, Response]:\n if request.method == \"HEAD\":\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist(\"Content-Encoding\")\n if content_encoding:\n encoding = content_encoding.pop()\n max_size = request.meta.get(\"download_maxsize\", self._max_size)\n warn_size = request.meta.get(\"download_warnsize\", self._warn_size)\n try:\n decoded_body = self._decode(\n response.body, encoding.lower(), max_size\n )\n except _DecompressionMaxSizeExceeded:\n raise IgnoreRequest(\n f\"Ignored response {response} because its body \"\n f\"({len(response.body)} B) exceeded DOWNLOAD_MAXSIZE \"\n f\"({max_size} B) during decompression.\"\n )\n if len(response.body) < warn_size <= len(decoded_body):\n logger.warning(\n f\"{response} body size after decompression \"\n f\"({len(decoded_body)} B) is larger than the \"\n f\"download warning size ({warn_size} B).\"\n )\n if self.stats:\n self.stats.inc_value(\n \"httpcompression/response_bytes\",\n len(decoded_body),\n spider=spider,\n )\n self.stats.inc_value(\n \"httpcompression/response_count\", spider=spider\n )\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = dict(cls=respcls, body=decoded_body)\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs[\"encoding\"] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers[\"Content-Encoding\"]\n\n return response\n\n def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:\n if encoding == b\"gzip\" or encoding == b\"x-gzip\":\n return gunzip(body, max_size=max_size)\n if encoding == b\"deflate\":\n return _inflate(body, max_size=max_size)\n if encoding == b\"br\" and b\"br\" in ACCEPTED_ENCODINGS:\n return _unbrotli(body, max_size=max_size)\n if encoding == b\"zstd\" and b\"zstd\" in ACCEPTED_ENCODINGS:\n return _unzstd(body, max_size=max_size)\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}]}
| 2,249 | 660 |
gh_patches_debug_31051
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-441
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add build completion to buildroot override add
From: https://fedorahosted.org/bodhi/ticket/788
"Small regression in Bodhi 2.0: builds completion fails to work in the override page.
I tried to submit an override for qpid-cpp-0.32-8.fc23 (due to boost rebuild induced ABI break), and it failed in both discovering the name of the source package and the nvr."
</issue>
<code>
[start of bodhi/views/generic.py]
1 # This program is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU General Public License
3 # as published by the Free Software Foundation; either version 2
4 # of the License, or (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10 #
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14
15 import datetime
16 import sqlalchemy as sa
17
18 from pyramid.security import authenticated_userid
19 from pyramid.view import view_config, notfound_view_config
20 from pyramid.exceptions import HTTPNotFound, HTTPForbidden
21
22 from bodhi import log
23 import bodhi.models
24 import bodhi.util
25
26
27 @notfound_view_config(append_slash=True)
28 def notfound_view(context, request):
29 """ Automatically redirects to slash-appended routes.
30
31 http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/urldispatch.html#redirecting-to-slash-appended-rou
32 """
33 return HTTPNotFound()
34
35
36 def get_top_testers(request):
37 db = request.db
38 blacklist = request.registry.settings.get('stats_blacklist').split()
39 days = int(request.registry.settings.get('top_testers_timeframe', 7))
40 start_time = datetime.datetime.utcnow() - datetime.timedelta(days=days)
41
42 query = db.query(
43 bodhi.models.User,
44 sa.func.count(bodhi.models.User.comments).label('count_1')
45 ).join(bodhi.models.Comment)
46 query = query\
47 .order_by('count_1 desc')\
48 .filter(bodhi.models.Comment.timestamp > start_time)
49
50 for user in blacklist:
51 query = query.filter(bodhi.models.User.name != user)
52
53 return query\
54 .group_by(bodhi.models.User)\
55 .limit(5)\
56 .all()
57
58
59 def get_latest_updates(request, critpath, security):
60 db = request.db
61 query = db.query(bodhi.models.Update)
62
63 if critpath:
64 query = query.filter(
65 bodhi.models.Update.critpath==True)
66 if security:
67 query = query.filter(
68 bodhi.models.Update.type==bodhi.models.UpdateType.security)
69
70 query = query.filter(
71 bodhi.models.Update.status==bodhi.models.UpdateStatus.testing)
72
73 query = query.order_by(bodhi.models.Update.date_submitted.desc())
74 return query.limit(5).all()
75
76
77 @view_config(route_name='home', renderer='home.html')
78 def home(request):
79 """ Returns data for the frontpage """
80 r = request
81
82 @request.cache.cache_on_arguments()
83 def work():
84 top_testers = get_top_testers(request)
85 critpath_updates = get_latest_updates(request, True, False)
86 security_updates = get_latest_updates(request, False, True)
87
88 return {
89 "top_testers": [(obj.__json__(r), n) for obj, n in top_testers],
90 "critpath_updates": [obj.__json__(r) for obj in critpath_updates],
91 "security_updates": [obj.__json__(r) for obj in security_updates],
92 }
93
94 return work()
95
96
97 @view_config(route_name='new_update', renderer='new_update.html')
98 def new_update(request):
99 """ Returns the new update form """
100 user = authenticated_userid(request)
101 if not user:
102 raise HTTPForbidden("You must be logged in.")
103 return dict(
104 update=None,
105 types=reversed(bodhi.models.UpdateType.values()),
106 severities=reversed(bodhi.models.UpdateSeverity.values()),
107 suggestions=reversed(bodhi.models.UpdateSuggestion.values()),
108 )
109
110
111 @view_config(route_name='latest_candidates', renderer='json')
112 def latest_candidates(request):
113 """
114 For a given `package`, this method returns the most recent builds tagged
115 into the Release.candidate_tag for all Releases.
116 """
117 koji = request.koji
118 db = request.db
119
120 @request.cache.cache_on_arguments()
121 def work(pkg):
122 result = []
123 koji.multicall = True
124
125 releases = db.query(bodhi.models.Release) \
126 .filter(
127 bodhi.models.Release.state.in_(
128 (bodhi.models.ReleaseState.pending,
129 bodhi.models.ReleaseState.current)))
130
131 for release in releases:
132 koji.listTagged(release.candidate_tag, package=pkg, latest=True)
133
134 builds = koji.multiCall() or [] # Protect against None
135
136 for build in builds:
137 if isinstance(build, dict):
138 continue
139 if build and build[0] and build[0][0]:
140 result.append({
141 'nvr': build[0][0]['nvr'],
142 'id': build[0][0]['id'],
143 })
144 return result
145
146
147 pkg = request.params.get('package')
148 log.debug('latest_candidate(%r)' % pkg)
149
150 if not pkg:
151 return []
152
153 result = work(pkg)
154
155 log.debug(result)
156 return result
157
158
159 @view_config(route_name='latest_builds', renderer='json')
160 def latest_builds(request):
161 """ Get a list of the latest builds for a given package.
162
163 Returns a dictionary of the release dist tag to the latest build.
164 """
165 builds = {}
166 koji = request.koji
167 package = request.params.get('package')
168 for tag_type, tags in bodhi.models.Release.get_tags()[0].iteritems():
169 for tag in tags:
170 try:
171 for build in koji.getLatestBuilds(tag, package=package):
172 builds[tag] = build['nvr']
173 except: # Things like EPEL don't have pending tags
174 pass
175 return builds
176
177
178 @view_config(route_name='new_override', renderer='override.html')
179 def new_override(request):
180 """ Returns the new buildroot override form """
181 user = authenticated_userid(request)
182 if not user:
183 raise HTTPForbidden("You must be logged in.")
184 return dict()
185
186
187 @view_config(route_name='api_version', renderer='json')
188 def api_version(request):
189 """ Returns the Bodhi API version """
190 return dict(version=bodhi.util.version())
191
[end of bodhi/views/generic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bodhi/views/generic.py b/bodhi/views/generic.py
--- a/bodhi/views/generic.py
+++ b/bodhi/views/generic.py
@@ -16,6 +16,7 @@
import sqlalchemy as sa
from pyramid.security import authenticated_userid
+from pyramid.settings import asbool
from pyramid.view import view_config, notfound_view_config
from pyramid.exceptions import HTTPNotFound, HTTPForbidden
@@ -118,7 +119,7 @@
db = request.db
@request.cache.cache_on_arguments()
- def work(pkg):
+ def work(pkg, testing):
result = []
koji.multicall = True
@@ -128,8 +129,12 @@
(bodhi.models.ReleaseState.pending,
bodhi.models.ReleaseState.current)))
+ kwargs = dict(package=pkg, latest=True)
for release in releases:
- koji.listTagged(release.candidate_tag, package=pkg, latest=True)
+ koji.listTagged(release.candidate_tag, **kwargs)
+ if testing:
+ koji.listTagged(release.testing_tag, **kwargs)
+ koji.listTagged(release.pending_testing_tag, **kwargs)
builds = koji.multiCall() or [] # Protect against None
@@ -145,12 +150,13 @@
pkg = request.params.get('package')
- log.debug('latest_candidate(%r)' % pkg)
+ testing = asbool(request.params.get('testing'))
+ log.debug('latest_candidate(%r, %r)' % (pkg, testing))
if not pkg:
return []
- result = work(pkg)
+ result = work(pkg, testing)
log.debug(result)
return result
|
{"golden_diff": "diff --git a/bodhi/views/generic.py b/bodhi/views/generic.py\n--- a/bodhi/views/generic.py\n+++ b/bodhi/views/generic.py\n@@ -16,6 +16,7 @@\n import sqlalchemy as sa\n \n from pyramid.security import authenticated_userid\n+from pyramid.settings import asbool\n from pyramid.view import view_config, notfound_view_config\n from pyramid.exceptions import HTTPNotFound, HTTPForbidden\n \n@@ -118,7 +119,7 @@\n db = request.db\n \n @request.cache.cache_on_arguments()\n- def work(pkg):\n+ def work(pkg, testing):\n result = []\n koji.multicall = True\n \n@@ -128,8 +129,12 @@\n (bodhi.models.ReleaseState.pending,\n bodhi.models.ReleaseState.current)))\n \n+ kwargs = dict(package=pkg, latest=True)\n for release in releases:\n- koji.listTagged(release.candidate_tag, package=pkg, latest=True)\n+ koji.listTagged(release.candidate_tag, **kwargs)\n+ if testing:\n+ koji.listTagged(release.testing_tag, **kwargs)\n+ koji.listTagged(release.pending_testing_tag, **kwargs)\n \n builds = koji.multiCall() or [] # Protect against None\n \n@@ -145,12 +150,13 @@\n \n \n pkg = request.params.get('package')\n- log.debug('latest_candidate(%r)' % pkg)\n+ testing = asbool(request.params.get('testing'))\n+ log.debug('latest_candidate(%r, %r)' % (pkg, testing))\n \n if not pkg:\n return []\n \n- result = work(pkg)\n+ result = work(pkg, testing)\n \n log.debug(result)\n return result\n", "issue": "add build completion to buildroot override add\nFrom: https://fedorahosted.org/bodhi/ticket/788\n\n\"Small regression in Bodhi 2.0: builds completion fails to work in the override page.\n\nI tried to submit an override for qpid-cpp-0.32-8.fc23 (due to boost rebuild induced ABI break), and it failed in both discovering the name of the source package and the nvr.\"\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport datetime\nimport sqlalchemy as sa\n\nfrom pyramid.security import authenticated_userid\nfrom pyramid.view import view_config, notfound_view_config\nfrom pyramid.exceptions import HTTPNotFound, HTTPForbidden\n\nfrom bodhi import log\nimport bodhi.models\nimport bodhi.util\n\n\n@notfound_view_config(append_slash=True)\ndef notfound_view(context, request):\n \"\"\" Automatically redirects to slash-appended routes.\n\n http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/urldispatch.html#redirecting-to-slash-appended-rou\n \"\"\"\n return HTTPNotFound()\n\n\ndef get_top_testers(request):\n db = request.db\n blacklist = request.registry.settings.get('stats_blacklist').split()\n days = int(request.registry.settings.get('top_testers_timeframe', 7))\n start_time = datetime.datetime.utcnow() - datetime.timedelta(days=days)\n\n query = db.query(\n bodhi.models.User,\n sa.func.count(bodhi.models.User.comments).label('count_1')\n ).join(bodhi.models.Comment)\n query = query\\\n .order_by('count_1 desc')\\\n .filter(bodhi.models.Comment.timestamp > start_time)\n\n for user in blacklist:\n query = query.filter(bodhi.models.User.name != user)\n\n return query\\\n .group_by(bodhi.models.User)\\\n .limit(5)\\\n .all()\n\n\ndef get_latest_updates(request, critpath, security):\n db = request.db\n query = db.query(bodhi.models.Update)\n\n if critpath:\n query = query.filter(\n bodhi.models.Update.critpath==True)\n if security:\n query = query.filter(\n bodhi.models.Update.type==bodhi.models.UpdateType.security)\n\n query = query.filter(\n bodhi.models.Update.status==bodhi.models.UpdateStatus.testing)\n\n query = query.order_by(bodhi.models.Update.date_submitted.desc())\n return query.limit(5).all()\n\n\n@view_config(route_name='home', renderer='home.html')\ndef home(request):\n \"\"\" Returns data for the frontpage \"\"\"\n r = request\n\n @request.cache.cache_on_arguments()\n def work():\n top_testers = get_top_testers(request)\n critpath_updates = get_latest_updates(request, True, False)\n security_updates = get_latest_updates(request, False, True)\n\n return {\n \"top_testers\": [(obj.__json__(r), n) for obj, n in top_testers],\n \"critpath_updates\": [obj.__json__(r) for obj in critpath_updates],\n \"security_updates\": [obj.__json__(r) for obj in security_updates],\n }\n\n return work()\n\n\n@view_config(route_name='new_update', renderer='new_update.html')\ndef new_update(request):\n \"\"\" Returns the new update form \"\"\"\n user = authenticated_userid(request)\n if not user:\n raise HTTPForbidden(\"You must be logged in.\")\n return dict(\n update=None,\n types=reversed(bodhi.models.UpdateType.values()),\n severities=reversed(bodhi.models.UpdateSeverity.values()),\n suggestions=reversed(bodhi.models.UpdateSuggestion.values()),\n )\n\n\n@view_config(route_name='latest_candidates', renderer='json')\ndef latest_candidates(request):\n \"\"\"\n For a given `package`, this method returns the most recent builds tagged\n into the Release.candidate_tag for all Releases.\n \"\"\"\n koji = request.koji\n db = request.db\n\n @request.cache.cache_on_arguments()\n def work(pkg):\n result = []\n koji.multicall = True\n\n releases = db.query(bodhi.models.Release) \\\n .filter(\n bodhi.models.Release.state.in_(\n (bodhi.models.ReleaseState.pending,\n bodhi.models.ReleaseState.current)))\n\n for release in releases:\n koji.listTagged(release.candidate_tag, package=pkg, latest=True)\n\n builds = koji.multiCall() or [] # Protect against None\n\n for build in builds:\n if isinstance(build, dict):\n continue\n if build and build[0] and build[0][0]:\n result.append({\n 'nvr': build[0][0]['nvr'],\n 'id': build[0][0]['id'],\n })\n return result\n\n\n pkg = request.params.get('package')\n log.debug('latest_candidate(%r)' % pkg)\n\n if not pkg:\n return []\n\n result = work(pkg)\n\n log.debug(result)\n return result\n\n\n@view_config(route_name='latest_builds', renderer='json')\ndef latest_builds(request):\n \"\"\" Get a list of the latest builds for a given package.\n\n Returns a dictionary of the release dist tag to the latest build.\n \"\"\"\n builds = {}\n koji = request.koji\n package = request.params.get('package')\n for tag_type, tags in bodhi.models.Release.get_tags()[0].iteritems():\n for tag in tags:\n try:\n for build in koji.getLatestBuilds(tag, package=package):\n builds[tag] = build['nvr']\n except: # Things like EPEL don't have pending tags\n pass\n return builds\n\n\n@view_config(route_name='new_override', renderer='override.html')\ndef new_override(request):\n \"\"\" Returns the new buildroot override form \"\"\"\n user = authenticated_userid(request)\n if not user:\n raise HTTPForbidden(\"You must be logged in.\")\n return dict()\n\n\n@view_config(route_name='api_version', renderer='json')\ndef api_version(request):\n \"\"\" Returns the Bodhi API version \"\"\"\n return dict(version=bodhi.util.version())\n", "path": "bodhi/views/generic.py"}]}
| 2,493 | 390 |
gh_patches_debug_8807
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-1819
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PCC gives error with AMP
## 🐛 Bug
On turning on Automatic Mixed Precision, there is an error in Pearson Correlation Coefficient computation at this [line](https://github.com/Lightning-AI/torchmetrics/blob/962f82db7b0ab7ca32fe050aba45f1d3f9fe64c9/src/torchmetrics/functional/regression/pearson.py#L95). The trace is as follows,
```
File "/home/nikhil/miniconda3/envs/mrl/lib/python3.8/site-packages/torchmetrics/functional/regression/pearson.py", line 80, in _pearson_corrcoef_compute
corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
RuntimeError: "sqrt_vml_cpu" not implemented for 'Half'
```
Turning AMP off, it fixes it.
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior...
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<details>
<summary>Code sample</summary>
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
</details>
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- TorchMetrics version : `0.11.4` installed via `pip`
- Python & PyTorch Version (e.g., 1.0): `Python 3.8`, `Torch 2.0.1+cu118`
- Any other relevant information such as OS (e.g., Linux): `Linux`
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of src/torchmetrics/functional/regression/pearson.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs
20 from torchmetrics.utilities.checks import _check_same_shape
21
22
23 def _pearson_corrcoef_update(
24 preds: Tensor,
25 target: Tensor,
26 mean_x: Tensor,
27 mean_y: Tensor,
28 var_x: Tensor,
29 var_y: Tensor,
30 corr_xy: Tensor,
31 n_prior: Tensor,
32 num_outputs: int,
33 ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
34 """Update and returns variables required to compute Pearson Correlation Coefficient.
35
36 Check for same shape of input tensors.
37
38 Args:
39 preds: estimated scores
40 target: ground truth scores
41 mean_x: current mean estimate of x tensor
42 mean_y: current mean estimate of y tensor
43 var_x: current variance estimate of x tensor
44 var_y: current variance estimate of y tensor
45 corr_xy: current covariance estimate between x and y tensor
46 n_prior: current number of observed observations
47 num_outputs: Number of outputs in multioutput setting
48 """
49 # Data checking
50 _check_same_shape(preds, target)
51 _check_data_shape_to_num_outputs(preds, target, num_outputs)
52 cond = n_prior.mean() > 0
53
54 n_obs = preds.shape[0]
55 if cond:
56 mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)
57 my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)
58 else:
59 mx_new = preds.mean(0)
60 my_new = target.mean(0)
61
62 n_prior += n_obs
63
64 if cond:
65 var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)
66 var_y += ((target - my_new) * (target - mean_y)).sum(0)
67
68 else:
69 var_x += preds.var(0) * (n_obs - 1)
70 var_y += target.var(0) * (n_obs - 1)
71 corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)
72 mean_x = mx_new
73 mean_y = my_new
74
75 return mean_x, mean_y, var_x, var_y, corr_xy, n_prior
76
77
78 def _pearson_corrcoef_compute(
79 var_x: Tensor,
80 var_y: Tensor,
81 corr_xy: Tensor,
82 nb: Tensor,
83 ) -> Tensor:
84 """Compute the final pearson correlation based on accumulated statistics.
85
86 Args:
87 var_x: variance estimate of x tensor
88 var_y: variance estimate of y tensor
89 corr_xy: covariance estimate between x and y tensor
90 nb: number of observations
91 """
92 var_x /= nb - 1
93 var_y /= nb - 1
94 corr_xy /= nb - 1
95 corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
96 return torch.clamp(corrcoef, -1.0, 1.0)
97
98
99 def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:
100 """Compute pearson correlation coefficient.
101
102 Args:
103 preds: estimated scores
104 target: ground truth scores
105
106 Example (single output regression):
107 >>> from torchmetrics.functional.regression import pearson_corrcoef
108 >>> target = torch.tensor([3, -0.5, 2, 7])
109 >>> preds = torch.tensor([2.5, 0.0, 2, 8])
110 >>> pearson_corrcoef(preds, target)
111 tensor(0.9849)
112
113 Example (multi output regression):
114 >>> from torchmetrics.functional.regression import pearson_corrcoef
115 >>> target = torch.tensor([[3, -0.5], [2, 7]])
116 >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])
117 >>> pearson_corrcoef(preds, target)
118 tensor([1., 1.])
119 """
120 d = preds.shape[1] if preds.ndim == 2 else 1
121 _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)
122 mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()
123 var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()
124 _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(
125 preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]
126 )
127 return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)
128
[end of src/torchmetrics/functional/regression/pearson.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py
--- a/src/torchmetrics/functional/regression/pearson.py
+++ b/src/torchmetrics/functional/regression/pearson.py
@@ -92,6 +92,12 @@
var_x /= nb - 1
var_y /= nb - 1
corr_xy /= nb - 1
+ # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16
+ # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed
+ if var_x.dtype == torch.float16 and var_x.device == torch.device("cpu"):
+ var_x = var_x.bfloat16()
+ var_y = var_y.bfloat16()
+
corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()
return torch.clamp(corrcoef, -1.0, 1.0)
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py\n--- a/src/torchmetrics/functional/regression/pearson.py\n+++ b/src/torchmetrics/functional/regression/pearson.py\n@@ -92,6 +92,12 @@\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n+ # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n+ # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n+ if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n+ var_x = var_x.bfloat16()\n+ var_y = var_y.bfloat16()\n+\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n", "issue": "PCC gives error with AMP\n## \ud83d\udc1b Bug\r\n\r\nOn turning on Automatic Mixed Precision, there is an error in Pearson Correlation Coefficient computation at this [line](https://github.com/Lightning-AI/torchmetrics/blob/962f82db7b0ab7ca32fe050aba45f1d3f9fe64c9/src/torchmetrics/functional/regression/pearson.py#L95). The trace is as follows,\r\n```\r\n File \"/home/nikhil/miniconda3/envs/mrl/lib/python3.8/site-packages/torchmetrics/functional/regression/pearson.py\", line 80, in _pearson_corrcoef_compute \r\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() \r\nRuntimeError: \"sqrt_vml_cpu\" not implemented for 'Half'\r\n```\r\n\r\nTurning AMP off, it fixes it. \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- TorchMetrics version : `0.11.4` installed via `pip`\r\n- Python & PyTorch Version (e.g., 1.0): `Python 3.8`, `Torch 2.0.1+cu118`\r\n- Any other relevant information such as OS (e.g., Linux): `Linux`\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n cond = n_prior.mean() > 0\n\n n_obs = preds.shape[0]\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}]}
| 2,411 | 244 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.