problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_14660 | rasdani/github-patches | git_diff | lhotse-speech__lhotse-103 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A few questions...
I came across a few confusions while I was reading the code in order to write an example. It would be helpful for me if they are clarified ( I may have missed something).
https://github.com/lhotse-speech/lhotse/blob/master/lhotse/kaldi.py#L68
why `duration - start` rather than just `duration`?
https://github.com/lhotse-speech/lhotse/blob/master/lhotse/audio.py#L178
why not `[n_sources, n_channels, n_samples]`?
Thanks!
</issue>
<code>
[start of lhotse/kaldi.py]
1 from collections import defaultdict
2 from pathlib import Path
3 from typing import Dict, Optional, Tuple
4
5 from lhotse.audio import AudioSource, Recording, RecordingSet
6 from lhotse.supervision import SupervisionSegment, SupervisionSet
7 from lhotse.utils import Pathlike
8
9
10 def load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]:
11 """
12 Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests.
13 For this to work, at least the wav.scp file must exist.
14 SupervisionSet is created only when a segments file exists.
15 All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet.
16 In particular, feats.scp files are ignored.
17 """
18 path = Path(path)
19 assert path.is_dir()
20
21 # must exist for RecordingSet
22 recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True)
23
24 durations = defaultdict(float)
25 reco2dur = path / 'reco2dur'
26 if not reco2dur.is_file():
27 raise ValueError(f"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>")
28 with reco2dur.open() as f:
29 for line in f:
30 recording_id, dur = line.strip().split()
31 durations[recording_id] = float(dur)
32
33 audio_set = RecordingSet.from_recordings(
34 Recording(
35 id=recording_id,
36 sources=[
37 AudioSource(
38 type='command' if path_or_cmd.endswith('|') else 'file',
39 channels=[0],
40 source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd
41 )
42 ],
43 sampling_rate=sampling_rate,
44 num_samples=int(durations[recording_id] * sampling_rate),
45 duration=durations[recording_id]
46 )
47 for recording_id, path_or_cmd in recordings.items()
48 )
49
50 # must exist for SupervisionSet
51 segments = path / 'segments'
52 if not segments.is_file():
53 return audio_set, None
54
55 with segments.open() as f:
56 supervision_segments = [l.strip().split() for l in f]
57
58 texts = load_kaldi_text_mapping(path / 'text')
59 speakers = load_kaldi_text_mapping(path / 'utt2spk')
60 genders = load_kaldi_text_mapping(path / 'spk2gender')
61 languages = load_kaldi_text_mapping(path / 'utt2lang')
62
63 supervision_set = SupervisionSet.from_segments(
64 SupervisionSegment(
65 id=segment_id,
66 recording_id=recording_id,
67 start=float(start),
68 duration=float(duration) - float(start),
69 channel=0,
70 text=texts[segment_id],
71 language=languages[segment_id],
72 speaker=speakers[segment_id],
73 gender=genders[speakers[segment_id]]
74 )
75 for segment_id, recording_id, start, duration in supervision_segments
76 )
77
78 return audio_set, supervision_set
79
80
81 def load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]:
82 """Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict."""
83 mapping = defaultdict(lambda: None)
84 if path.is_file():
85 with path.open() as f:
86 mapping = dict(line.strip().split(' ', maxsplit=1) for line in f)
87 elif must_exist:
88 raise ValueError(f"No such file: {path}")
89 return mapping
90
[end of lhotse/kaldi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lhotse/kaldi.py b/lhotse/kaldi.py
--- a/lhotse/kaldi.py
+++ b/lhotse/kaldi.py
@@ -65,14 +65,14 @@
id=segment_id,
recording_id=recording_id,
start=float(start),
- duration=float(duration) - float(start),
+ duration=float(end) - float(start),
channel=0,
text=texts[segment_id],
language=languages[segment_id],
speaker=speakers[segment_id],
gender=genders[speakers[segment_id]]
)
- for segment_id, recording_id, start, duration in supervision_segments
+ for segment_id, recording_id, start, end in supervision_segments
)
return audio_set, supervision_set
| {"golden_diff": "diff --git a/lhotse/kaldi.py b/lhotse/kaldi.py\n--- a/lhotse/kaldi.py\n+++ b/lhotse/kaldi.py\n@@ -65,14 +65,14 @@\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n- duration=float(duration) - float(start),\n+ duration=float(end) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n- for segment_id, recording_id, start, duration in supervision_segments\n+ for segment_id, recording_id, start, end in supervision_segments\n )\n \n return audio_set, supervision_set\n", "issue": "A few questions...\nI came across a few confusions while I was reading the code in order to write an example. It would be helpful for me if they are clarified ( I may have missed something).\r\n\r\nhttps://github.com/lhotse-speech/lhotse/blob/master/lhotse/kaldi.py#L68\r\nwhy `duration - start` rather than just `duration`?\r\n\r\nhttps://github.com/lhotse-speech/lhotse/blob/master/lhotse/audio.py#L178\r\nwhy not `[n_sources, n_channels, n_samples]`?\r\n\r\nThanks!\n", "before_files": [{"content": "from collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Optional, Tuple\n\nfrom lhotse.audio import AudioSource, Recording, RecordingSet\nfrom lhotse.supervision import SupervisionSegment, SupervisionSet\nfrom lhotse.utils import Pathlike\n\n\ndef load_kaldi_data_dir(path: Pathlike, sampling_rate: int) -> Tuple[RecordingSet, Optional[SupervisionSet]]:\n \"\"\"\n Load a Kaldi data directory and convert it to a Lhotse RecordingSet and SupervisionSet manifests.\n For this to work, at least the wav.scp file must exist.\n SupervisionSet is created only when a segments file exists.\n All the other files (text, utt2spk, etc.) are optional, and some of them might not be handled yet.\n In particular, feats.scp files are ignored.\n \"\"\"\n path = Path(path)\n assert path.is_dir()\n\n # must exist for RecordingSet\n recordings = load_kaldi_text_mapping(path / 'wav.scp', must_exist=True)\n\n durations = defaultdict(float)\n reco2dur = path / 'reco2dur'\n if not reco2dur.is_file():\n raise ValueError(f\"No such file: '{reco2dur}' -- fix it by running: utils/data/get_reco2dur.sh <data-dir>\")\n with reco2dur.open() as f:\n for line in f:\n recording_id, dur = line.strip().split()\n durations[recording_id] = float(dur)\n\n audio_set = RecordingSet.from_recordings(\n Recording(\n id=recording_id,\n sources=[\n AudioSource(\n type='command' if path_or_cmd.endswith('|') else 'file',\n channels=[0],\n source=path_or_cmd[:-1] if path_or_cmd.endswith('|') else path_or_cmd\n )\n ],\n sampling_rate=sampling_rate,\n num_samples=int(durations[recording_id] * sampling_rate),\n duration=durations[recording_id]\n )\n for recording_id, path_or_cmd in recordings.items()\n )\n\n # must exist for SupervisionSet\n segments = path / 'segments'\n if not segments.is_file():\n return audio_set, None\n\n with segments.open() as f:\n supervision_segments = [l.strip().split() for l in f]\n\n texts = load_kaldi_text_mapping(path / 'text')\n speakers = load_kaldi_text_mapping(path / 'utt2spk')\n genders = load_kaldi_text_mapping(path / 'spk2gender')\n languages = load_kaldi_text_mapping(path / 'utt2lang')\n\n supervision_set = SupervisionSet.from_segments(\n SupervisionSegment(\n id=segment_id,\n recording_id=recording_id,\n start=float(start),\n duration=float(duration) - float(start),\n channel=0,\n text=texts[segment_id],\n language=languages[segment_id],\n speaker=speakers[segment_id],\n gender=genders[speakers[segment_id]]\n )\n for segment_id, recording_id, start, duration in supervision_segments\n )\n\n return audio_set, supervision_set\n\n\ndef load_kaldi_text_mapping(path: Path, must_exist: bool = False) -> Dict[str, Optional[str]]:\n \"\"\"Load Kaldi files such as utt2spk, spk2gender, text, etc. as a dict.\"\"\"\n mapping = defaultdict(lambda: None)\n if path.is_file():\n with path.open() as f:\n mapping = dict(line.strip().split(' ', maxsplit=1) for line in f)\n elif must_exist:\n raise ValueError(f\"No such file: {path}\")\n return mapping\n", "path": "lhotse/kaldi.py"}]} | 1,620 | 175 |
gh_patches_debug_3926 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-41 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem with flask_jwt
I'm trying to use the ScoutApp in an flask application using the flask_jwt module, but i recive the below error message when my loggin route are called:
```jsx
[2018-05-30 13:06:20,984] ERROR in __init__: Traceback (most recent call last):
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py", line 1639, in full_dispatch_request
rv = self.dispatch_request()
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 69, in dispatch_request
result = view_func(**req.view_args)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py", line 213, in __call__
return self._eop_wrapper_(self.__subject__, *args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 119, in tracing_function
raise e
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 116, in tracing_function
return original(*args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py", line 115, in _default_auth_request_handler
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
AttributeError: 'NoneType' object has no attribute 'get'
No user info
Request: OPTIONS /api/auth
QueryString: b''
IP: 127.0.0.1
Agent: linux | chrome 66.0.3359.181
Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36
Payload: b''
ERROR:condominio:Traceback (most recent call last):
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py", line 1639, in full_dispatch_request
rv = self.dispatch_request()
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 69, in dispatch_request
result = view_func(**req.view_args)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py", line 213, in __call__
return self._eop_wrapper_(self.__subject__, *args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 119, in tracing_function
raise e
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py", line 116, in tracing_function
return original(*args, **kwargs)
File "/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py", line 115, in _default_auth_request_handler
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
AttributeError: 'NoneType' object has no attribute 'get'
No user info
Request: OPTIONS /api/auth
QueryString: b''
IP: 127.0.0.1
Agent: linux | chrome 66.0.3359.181
Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36
Payload: b''
INFO:werkzeug:127.0.0.1 - - [30/May/2018 13:06:23] "OPTIONS /api/auth HTTP/1.1" 500
```
Debugging the problem, apparently the JWT can't get the request data to do the authentication. For some reason the request lose it the data when ScoutApp are enabled.
The JWT code that breaks:
```python
def _default_auth_request_handler():
data = request.get_json()
username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)
password = data.get(current_app.config.get('JWT_AUTH_PASSWORD_KEY'), None)
criterion = [username, password, len(data) == 2]
if not all(criterion):
raise JWTError('Bad Request', 'Invalid credentials')
identity = _jwt.authentication_callback(username, password)
if identity:
access_token = _jwt.jwt_encode_callback(identity)
return _jwt.auth_response_callback(access_token, identity)
else:
raise JWTError('Bad Request', 'Invalid credentials')
```
</issue>
<code>
[start of src/scout_apm/flask/__init__.py]
1 from flask import current_app
2 from flask.globals import _request_ctx_stack
3
4 import scout_apm.core
5 from scout_apm.core.config.config import ScoutConfig
6 from scout_apm.core.tracked_request import TrackedRequest
7 from scout_apm.core.monkey import CallableProxy
8
9
10 class ScoutApm(object):
11 def __init__(self, app=None):
12 self.app = app
13 if app is not None:
14 self.init_app(app)
15
16 def init_app(self, app):
17 app.before_first_request(self.before_first_request)
18 app.before_request(self.process_request)
19 app.after_request(self.process_response)
20
21 # Monkey-patch the Flask.dispatch_request method
22 app.dispatch_request = self.dispatch_request
23
24 #############
25 # Startup #
26 #############
27
28 def before_first_request(self):
29 self.extract_flask_settings()
30 scout_apm.core.install()
31
32 def extract_flask_settings(self):
33 """
34 Copies SCOUT_* settings in the app into Scout's config lookup
35 """
36 configs = {}
37 for name in filter(lambda x: x.startswith('SCOUT_'), current_app.config):
38 value = current_app.config[name]
39 clean_name = name.replace('SCOUT_', '').lower()
40 configs[clean_name] = value
41 ScoutConfig.set(**configs)
42
43 #############################
44 # Request Lifecycle hooks #
45 #############################
46
47 def dispatch_request(self):
48 """Modified version of Flask.dispatch_request to call process_view."""
49
50 req = _request_ctx_stack.top.request
51 app = current_app
52
53 if req.routing_exception is not None:
54 app.raise_routing_exception(req)
55
56 # The routing rule has some handy attributes to extract how Flask found
57 # this endpoint
58 rule = req.url_rule
59
60 # Wrap the real view_func
61 view_func = self.wrap_view_func(
62 app,
63 rule,
64 req,
65 app.view_functions[rule.endpoint],
66 req.view_args)
67
68 return view_func(**req.view_args)
69
70 def process_request(self):
71 TrackedRequest.instance().start_span(operation='Flask')
72
73 def wrap_view_func(self, app, rule, req, view_func, view_kwargs):
74 """ This method is called just before the flask view is called.
75 This is done by the dispatch_request method.
76 """
77 operation = view_func.__module__ + '.' + view_func.__name__
78 return self.trace_view_function(
79 view_func,
80 ('Controller', {'path': req.path, 'name': operation}))
81
82 def trace_view_function(self, func, info):
83 try:
84 def tracing_function(original, *args, **kwargs):
85 entry_type, detail = info
86
87 operation = entry_type
88 if detail['name'] is not None:
89 operation = operation + '/' + detail['name']
90
91 tr = TrackedRequest.instance()
92 tr.mark_real_request()
93 span = tr.start_span(operation=operation)
94
95 for key in detail:
96 span.tag(key, detail[key])
97
98 # And the custom View stuff
99 # request = args[0]
100
101 # Extract headers
102 # regex = re.compile('^HTTP_')
103 # headers = dict((regex.sub('', header), value) for (header, value)
104 # in request.META.items() if header.startswith('HTTP_'))
105
106 # span.tag('remote_addr', request.META['REMOTE_ADDR'])
107
108 try:
109 return original(*args, **kwargs)
110 except Exception as e:
111 TrackedRequest.instance().tag('error', 'true')
112 raise e
113 finally:
114 TrackedRequest.instance().stop_span()
115
116 return CallableProxy(func, tracing_function)
117 except Exception as err:
118 # If we can't wrap for any reason, just return the original
119 return func
120
121 def process_response(self, response):
122 TrackedRequest.instance().stop_span()
123 return response
124
125
[end of src/scout_apm/flask/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/flask/__init__.py b/src/scout_apm/flask/__init__.py
--- a/src/scout_apm/flask/__init__.py
+++ b/src/scout_apm/flask/__init__.py
@@ -50,6 +50,10 @@
req = _request_ctx_stack.top.request
app = current_app
+ # Return flask's default options response. See issue #40
+ if req.method == 'OPTIONS':
+ return app.make_default_options_response()
+
if req.routing_exception is not None:
app.raise_routing_exception(req)
| {"golden_diff": "diff --git a/src/scout_apm/flask/__init__.py b/src/scout_apm/flask/__init__.py\n--- a/src/scout_apm/flask/__init__.py\n+++ b/src/scout_apm/flask/__init__.py\n@@ -50,6 +50,10 @@\n req = _request_ctx_stack.top.request\n app = current_app\n \n+ # Return flask's default options response. See issue #40\n+ if req.method == 'OPTIONS':\n+ return app.make_default_options_response()\n+\n if req.routing_exception is not None:\n app.raise_routing_exception(req)\n", "issue": "Problem with flask_jwt\nI'm trying to use the ScoutApp in an flask application using the flask_jwt module, but i recive the below error message when my loggin route are called:\r\n\r\n```jsx\r\n[2018-05-30 13:06:20,984] ERROR in __init__: Traceback (most recent call last):\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py\", line 1639, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 69, in dispatch_request\r\n result = view_func(**req.view_args)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py\", line 213, in __call__\r\n return self._eop_wrapper_(self.__subject__, *args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 119, in tracing_function\r\n raise e\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 116, in tracing_function\r\n return original(*args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py\", line 115, in _default_auth_request_handler\r\n username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)\r\n\r\nAttributeError: 'NoneType' object has no attribute 'get'\r\n No user info\r\n Request: OPTIONS /api/auth\r\n QueryString: b''\r\n IP: 127.0.0.1\r\n Agent: linux | chrome 66.0.3359.181\r\n Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\r\n Payload: b''\r\n \r\nERROR:condominio:Traceback (most recent call last):\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask/app.py\", line 1639, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 69, in dispatch_request\r\n result = view_func(**req.view_args)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/core/monkey.py\", line 213, in __call__\r\n return self._eop_wrapper_(self.__subject__, *args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 119, in tracing_function\r\n raise e\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/scout_apm/flask/__init__.py\", line 116, in tracing_function\r\n return original(*args, **kwargs)\r\n\r\n File \"/home/andreramos/.virtualenvs/back/lib/python3.6/site-packages/flask_jwt/__init__.py\", line 115, in _default_auth_request_handler\r\n username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)\r\n\r\nAttributeError: 'NoneType' object has no attribute 'get'\r\n No user info\r\n Request: OPTIONS /api/auth\r\n QueryString: b''\r\n IP: 127.0.0.1\r\n Agent: linux | chrome 66.0.3359.181\r\n Raw Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\r\n Payload: b''\r\n \r\nINFO:werkzeug:127.0.0.1 - - [30/May/2018 13:06:23] \"OPTIONS /api/auth HTTP/1.1\" 500 \r\n```\r\n\r\nDebugging the problem, apparently the JWT can't get the request data to do the authentication. For some reason the request lose it the data when ScoutApp are enabled.\r\n\r\nThe JWT code that breaks:\r\n```python\r\ndef _default_auth_request_handler():\r\n data = request.get_json()\r\n username = data.get(current_app.config.get('JWT_AUTH_USERNAME_KEY'), None)\r\n password = data.get(current_app.config.get('JWT_AUTH_PASSWORD_KEY'), None)\r\n criterion = [username, password, len(data) == 2]\r\n\r\n if not all(criterion):\r\n raise JWTError('Bad Request', 'Invalid credentials')\r\n\r\n identity = _jwt.authentication_callback(username, password)\r\n\r\n if identity:\r\n access_token = _jwt.jwt_encode_callback(identity)\r\n return _jwt.auth_response_callback(access_token, identity)\r\n else:\r\n raise JWTError('Bad Request', 'Invalid credentials')\r\n```\r\n\n", "before_files": [{"content": "from flask import current_app\nfrom flask.globals import _request_ctx_stack\n\nimport scout_apm.core\nfrom scout_apm.core.config.config import ScoutConfig\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.monkey import CallableProxy\n\n\nclass ScoutApm(object):\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n app.before_first_request(self.before_first_request)\n app.before_request(self.process_request)\n app.after_request(self.process_response)\n\n # Monkey-patch the Flask.dispatch_request method\n app.dispatch_request = self.dispatch_request\n\n #############\n # Startup #\n #############\n\n def before_first_request(self):\n self.extract_flask_settings()\n scout_apm.core.install()\n\n def extract_flask_settings(self):\n \"\"\"\n Copies SCOUT_* settings in the app into Scout's config lookup\n \"\"\"\n configs = {}\n for name in filter(lambda x: x.startswith('SCOUT_'), current_app.config):\n value = current_app.config[name]\n clean_name = name.replace('SCOUT_', '').lower()\n configs[clean_name] = value\n ScoutConfig.set(**configs)\n\n #############################\n # Request Lifecycle hooks #\n #############################\n\n def dispatch_request(self):\n \"\"\"Modified version of Flask.dispatch_request to call process_view.\"\"\"\n\n req = _request_ctx_stack.top.request\n app = current_app\n\n if req.routing_exception is not None:\n app.raise_routing_exception(req)\n\n # The routing rule has some handy attributes to extract how Flask found\n # this endpoint\n rule = req.url_rule\n\n # Wrap the real view_func\n view_func = self.wrap_view_func(\n app,\n rule,\n req,\n app.view_functions[rule.endpoint],\n req.view_args)\n\n return view_func(**req.view_args)\n\n def process_request(self):\n TrackedRequest.instance().start_span(operation='Flask')\n\n def wrap_view_func(self, app, rule, req, view_func, view_kwargs):\n \"\"\" This method is called just before the flask view is called.\n This is done by the dispatch_request method.\n \"\"\"\n operation = view_func.__module__ + '.' + view_func.__name__\n return self.trace_view_function(\n view_func,\n ('Controller', {'path': req.path, 'name': operation}))\n\n def trace_view_function(self, func, info):\n try:\n def tracing_function(original, *args, **kwargs):\n entry_type, detail = info\n\n operation = entry_type\n if detail['name'] is not None:\n operation = operation + '/' + detail['name']\n\n tr = TrackedRequest.instance()\n tr.mark_real_request()\n span = tr.start_span(operation=operation)\n\n for key in detail:\n span.tag(key, detail[key])\n\n # And the custom View stuff\n # request = args[0]\n\n # Extract headers\n # regex = re.compile('^HTTP_')\n # headers = dict((regex.sub('', header), value) for (header, value)\n # in request.META.items() if header.startswith('HTTP_'))\n\n # span.tag('remote_addr', request.META['REMOTE_ADDR'])\n\n try:\n return original(*args, **kwargs)\n except Exception as e:\n TrackedRequest.instance().tag('error', 'true')\n raise e\n finally:\n TrackedRequest.instance().stop_span()\n\n return CallableProxy(func, tracing_function)\n except Exception as err:\n # If we can't wrap for any reason, just return the original\n return func\n\n def process_response(self, response):\n TrackedRequest.instance().stop_span()\n return response\n\n", "path": "src/scout_apm/flask/__init__.py"}]} | 2,856 | 136 |
gh_patches_debug_8194 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-6402 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve regex for semver in automation rules
We have a very basic pattern, but on the semver faq they have a better one
https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
This needs to be changed and with a comment to the link from where it came from.
https://github.com/readthedocs/readthedocs.org/blob/ff0ad67991e599f79e9d956a3733785ccb78c505/readthedocs/builds/constants.py#L113-L113
</issue>
<code>
[start of readthedocs/builds/constants.py]
1 """Constants for the builds app."""
2
3 from django.conf import settings
4 from django.utils.translation import ugettext_lazy as _
5
6
7 BUILD_STATE_TRIGGERED = 'triggered'
8 BUILD_STATE_CLONING = 'cloning'
9 BUILD_STATE_INSTALLING = 'installing'
10 BUILD_STATE_BUILDING = 'building'
11 BUILD_STATE_FINISHED = 'finished'
12
13 BUILD_STATE = (
14 (BUILD_STATE_TRIGGERED, _('Triggered')),
15 (BUILD_STATE_CLONING, _('Cloning')),
16 (BUILD_STATE_INSTALLING, _('Installing')),
17 (BUILD_STATE_BUILDING, _('Building')),
18 (BUILD_STATE_FINISHED, _('Finished')),
19 )
20
21 BUILD_TYPES = (
22 ('html', _('HTML')),
23 ('pdf', _('PDF')),
24 ('epub', _('Epub')),
25 # There is currently no support for building man/dash formats, but we keep
26 # it there since the DB might still contain those values for legacy
27 # projects.
28 ('man', _('Manpage')),
29 ('dash', _('Dash')),
30 )
31
32 # Manager name for Internal Versions or Builds.
33 # ie: Versions and Builds Excluding pull request/merge request Versions and Builds.
34 INTERNAL = 'internal'
35 # Manager name for External Versions or Builds.
36 # ie: Only pull request/merge request Versions and Builds.
37 EXTERNAL = 'external'
38 EXTERNAL_TEXT = _('External')
39
40 BRANCH = 'branch'
41 BRANCH_TEXT = _('Branch')
42 TAG = 'tag'
43 TAG_TEXT = _('Tag')
44 UNKNOWN = 'unknown'
45 UNKNOWN_TEXT = _('Unknown')
46
47 VERSION_TYPES = (
48 (BRANCH, BRANCH_TEXT),
49 (TAG, TAG_TEXT),
50 (EXTERNAL, EXTERNAL_TEXT),
51 (UNKNOWN, UNKNOWN_TEXT),
52 )
53
54 LATEST = settings.RTD_LATEST
55 LATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME
56
57 STABLE = settings.RTD_STABLE
58 STABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME
59
60 # Those names are specialcased version names. They do not correspond to
61 # branches/tags in a project's repository.
62 NON_REPOSITORY_VERSIONS = (
63 LATEST,
64 STABLE,
65 )
66
67 # General Build Statuses
68 BUILD_STATUS_FAILURE = 'failed'
69 BUILD_STATUS_PENDING = 'pending'
70 BUILD_STATUS_SUCCESS = 'success'
71
72 # GitHub Build Statuses
73 GITHUB_BUILD_STATUS_FAILURE = 'failure'
74 GITHUB_BUILD_STATUS_PENDING = 'pending'
75 GITHUB_BUILD_STATUS_SUCCESS = 'success'
76
77 # GitLab Build Statuses
78 GITLAB_BUILD_STATUS_FAILURE = 'failed'
79 GITLAB_BUILD_STATUS_PENDING = 'pending'
80 GITLAB_BUILD_STATUS_SUCCESS = 'success'
81
82 # Used to select correct Build status and description to be sent to each service API
83 SELECT_BUILD_STATUS = {
84 BUILD_STATUS_FAILURE: {
85 'github': GITHUB_BUILD_STATUS_FAILURE,
86 'gitlab': GITLAB_BUILD_STATUS_FAILURE,
87 'description': 'Read the Docs build failed!',
88 },
89 BUILD_STATUS_PENDING: {
90 'github': GITHUB_BUILD_STATUS_PENDING,
91 'gitlab': GITLAB_BUILD_STATUS_PENDING,
92 'description': 'Read the Docs build is in progress!',
93 },
94 BUILD_STATUS_SUCCESS: {
95 'github': GITHUB_BUILD_STATUS_SUCCESS,
96 'gitlab': GITLAB_BUILD_STATUS_SUCCESS,
97 'description': 'Read the Docs build succeeded!',
98 },
99 }
100
101 RTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs'
102
103 GITHUB_EXTERNAL_VERSION_NAME = 'Pull Request'
104 GITLAB_EXTERNAL_VERSION_NAME = 'Merge Request'
105 GENERIC_EXTERNAL_VERSION_NAME = 'External Version'
106
107
108 # Automation rules
109
110 ALL_VERSIONS = 'all-versions'
111 ALL_VERSIONS_REGEX = r'.*'
112 SEMVER_VERSIONS = 'semver-versions'
113 SEMVER_VERSIONS_REGEX = r'^v?(\d+\.)(\d+\.)(\d+)(-.+)?$'
114
115
116 PREDEFINED_MATCH_ARGS = (
117 (ALL_VERSIONS, _('Any version')),
118 (SEMVER_VERSIONS, _('SemVer versions')),
119 (None, _('Custom match')),
120 )
121
122 PREDEFINED_MATCH_ARGS_VALUES = {
123 ALL_VERSIONS: ALL_VERSIONS_REGEX,
124 SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX,
125 }
126
[end of readthedocs/builds/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/builds/constants.py b/readthedocs/builds/constants.py
--- a/readthedocs/builds/constants.py
+++ b/readthedocs/builds/constants.py
@@ -110,7 +110,10 @@
ALL_VERSIONS = 'all-versions'
ALL_VERSIONS_REGEX = r'.*'
SEMVER_VERSIONS = 'semver-versions'
-SEMVER_VERSIONS_REGEX = r'^v?(\d+\.)(\d+\.)(\d+)(-.+)?$'
+
+# Pattern referred from
+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
+SEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$' # noqa
PREDEFINED_MATCH_ARGS = (
| {"golden_diff": "diff --git a/readthedocs/builds/constants.py b/readthedocs/builds/constants.py\n--- a/readthedocs/builds/constants.py\n+++ b/readthedocs/builds/constants.py\n@@ -110,7 +110,10 @@\n ALL_VERSIONS = 'all-versions'\n ALL_VERSIONS_REGEX = r'.*'\n SEMVER_VERSIONS = 'semver-versions'\n-SEMVER_VERSIONS_REGEX = r'^v?(\\d+\\.)(\\d+\\.)(\\d+)(-.+)?$'\n+\n+# Pattern referred from\n+# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\n+SEMVER_VERSIONS_REGEX = r'^v?(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$' # noqa\n \n \n PREDEFINED_MATCH_ARGS = (\n", "issue": "Improve regex for semver in automation rules\nWe have a very basic pattern, but on the semver faq they have a better one\r\n\r\nhttps://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string\r\n\r\nThis needs to be changed and with a comment to the link from where it came from.\r\n\r\nhttps://github.com/readthedocs/readthedocs.org/blob/ff0ad67991e599f79e9d956a3733785ccb78c505/readthedocs/builds/constants.py#L113-L113\n", "before_files": [{"content": "\"\"\"Constants for the builds app.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nBUILD_STATE_TRIGGERED = 'triggered'\nBUILD_STATE_CLONING = 'cloning'\nBUILD_STATE_INSTALLING = 'installing'\nBUILD_STATE_BUILDING = 'building'\nBUILD_STATE_FINISHED = 'finished'\n\nBUILD_STATE = (\n (BUILD_STATE_TRIGGERED, _('Triggered')),\n (BUILD_STATE_CLONING, _('Cloning')),\n (BUILD_STATE_INSTALLING, _('Installing')),\n (BUILD_STATE_BUILDING, _('Building')),\n (BUILD_STATE_FINISHED, _('Finished')),\n)\n\nBUILD_TYPES = (\n ('html', _('HTML')),\n ('pdf', _('PDF')),\n ('epub', _('Epub')),\n # There is currently no support for building man/dash formats, but we keep\n # it there since the DB might still contain those values for legacy\n # projects.\n ('man', _('Manpage')),\n ('dash', _('Dash')),\n)\n\n# Manager name for Internal Versions or Builds.\n# ie: Versions and Builds Excluding pull request/merge request Versions and Builds.\nINTERNAL = 'internal'\n# Manager name for External Versions or Builds.\n# ie: Only pull request/merge request Versions and Builds.\nEXTERNAL = 'external'\nEXTERNAL_TEXT = _('External')\n\nBRANCH = 'branch'\nBRANCH_TEXT = _('Branch')\nTAG = 'tag'\nTAG_TEXT = _('Tag')\nUNKNOWN = 'unknown'\nUNKNOWN_TEXT = _('Unknown')\n\nVERSION_TYPES = (\n (BRANCH, BRANCH_TEXT),\n (TAG, TAG_TEXT),\n (EXTERNAL, EXTERNAL_TEXT),\n (UNKNOWN, UNKNOWN_TEXT),\n)\n\nLATEST = settings.RTD_LATEST\nLATEST_VERBOSE_NAME = settings.RTD_LATEST_VERBOSE_NAME\n\nSTABLE = settings.RTD_STABLE\nSTABLE_VERBOSE_NAME = settings.RTD_STABLE_VERBOSE_NAME\n\n# Those names are specialcased version names. They do not correspond to\n# branches/tags in a project's repository.\nNON_REPOSITORY_VERSIONS = (\n LATEST,\n STABLE,\n)\n\n# General Build Statuses\nBUILD_STATUS_FAILURE = 'failed'\nBUILD_STATUS_PENDING = 'pending'\nBUILD_STATUS_SUCCESS = 'success'\n\n# GitHub Build Statuses\nGITHUB_BUILD_STATUS_FAILURE = 'failure'\nGITHUB_BUILD_STATUS_PENDING = 'pending'\nGITHUB_BUILD_STATUS_SUCCESS = 'success'\n\n# GitLab Build Statuses\nGITLAB_BUILD_STATUS_FAILURE = 'failed'\nGITLAB_BUILD_STATUS_PENDING = 'pending'\nGITLAB_BUILD_STATUS_SUCCESS = 'success'\n\n# Used to select correct Build status and description to be sent to each service API\nSELECT_BUILD_STATUS = {\n BUILD_STATUS_FAILURE: {\n 'github': GITHUB_BUILD_STATUS_FAILURE,\n 'gitlab': GITLAB_BUILD_STATUS_FAILURE,\n 'description': 'Read the Docs build failed!',\n },\n BUILD_STATUS_PENDING: {\n 'github': GITHUB_BUILD_STATUS_PENDING,\n 'gitlab': GITLAB_BUILD_STATUS_PENDING,\n 'description': 'Read the Docs build is in progress!',\n },\n BUILD_STATUS_SUCCESS: {\n 'github': GITHUB_BUILD_STATUS_SUCCESS,\n 'gitlab': GITLAB_BUILD_STATUS_SUCCESS,\n 'description': 'Read the Docs build succeeded!',\n },\n}\n\nRTD_BUILD_STATUS_API_NAME = 'continuous-documentation/read-the-docs'\n\nGITHUB_EXTERNAL_VERSION_NAME = 'Pull Request'\nGITLAB_EXTERNAL_VERSION_NAME = 'Merge Request'\nGENERIC_EXTERNAL_VERSION_NAME = 'External Version'\n\n\n# Automation rules\n\nALL_VERSIONS = 'all-versions'\nALL_VERSIONS_REGEX = r'.*'\nSEMVER_VERSIONS = 'semver-versions'\nSEMVER_VERSIONS_REGEX = r'^v?(\\d+\\.)(\\d+\\.)(\\d+)(-.+)?$'\n\n\nPREDEFINED_MATCH_ARGS = (\n (ALL_VERSIONS, _('Any version')),\n (SEMVER_VERSIONS, _('SemVer versions')),\n (None, _('Custom match')),\n)\n\nPREDEFINED_MATCH_ARGS_VALUES = {\n ALL_VERSIONS: ALL_VERSIONS_REGEX,\n SEMVER_VERSIONS: SEMVER_VERSIONS_REGEX,\n}\n", "path": "readthedocs/builds/constants.py"}]} | 1,808 | 281 |
gh_patches_debug_56402 | rasdani/github-patches | git_diff | encode__httpx-361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing py.typed declaration?
`mypy` is complaining about not being able to find type annotations for `httpx`:
`error: Cannot find module named 'httpx'`
I'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 license="BSD",
44 description="The next generation HTTP client.",
45 long_description=get_long_description(),
46 long_description_content_type="text/markdown",
47 author="Tom Christie",
48 author_email="[email protected]",
49 package_data={"httpx": ["py.typed"]},
50 packages=get_packages("httpx"),
51 include_package_data=True,
52 install_requires=[
53 "certifi",
54 "chardet==3.*",
55 "h11==0.8.*",
56 "h2==3.*",
57 "hstspreload>=2019.8.27",
58 "idna==2.*",
59 "rfc3986==1.*",
60 ],
61 classifiers=[
62 "Development Status :: 3 - Alpha",
63 "Environment :: Web Environment",
64 "Intended Audience :: Developers",
65 "License :: OSI Approved :: BSD License",
66 "Operating System :: OS Independent",
67 "Topic :: Internet :: WWW/HTTP",
68 "Programming Language :: Python :: 3",
69 "Programming Language :: Python :: 3.6",
70 "Programming Language :: Python :: 3.7",
71 "Programming Language :: Python :: 3.8",
72 ],
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -49,6 +49,7 @@
package_data={"httpx": ["py.typed"]},
packages=get_packages("httpx"),
include_package_data=True,
+ zip_safe=False,
install_requires=[
"certifi",
"chardet==3.*",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,6 +49,7 @@\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n+ zip_safe=False,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n", "issue": "Missing py.typed declaration?\n`mypy` is complaining about not being able to find type annotations for `httpx`: \r\n\r\n`error: Cannot find module named 'httpx'`\r\n\r\nI'm somewhat new to using type annotations/static type checking in Python, but from the mypy documentation [here](https://mypy.readthedocs.io/en/latest/installed_packages.html#making-pep-561-compatible-packages) it looks like there may be a missing declaration in `setup.py`?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n install_requires=[\n \"certifi\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"hstspreload>=2019.8.27\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]} | 1,278 | 83 |
gh_patches_debug_2399 | rasdani/github-patches | git_diff | WeblateOrg__weblate-1655 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
File download is outdated
### Steps to reproduce
1. Edit string.
2. Donwload original translation file (without conversion).
### Actual behaviour
The file does not have recent changes.
### Expected behaviour
All changes should be reflected.
### Server configuration
Current master
</issue>
<code>
[start of weblate/trans/views/helper.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © 2012 - 2017 Michal Čihař <[email protected]>
4 #
5 # This file is part of Weblate <https://weblate.org/>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <https://www.gnu.org/licenses/>.
19 #
20 """Helper methods for views."""
21
22 from django.http import HttpResponse, Http404
23 from django.shortcuts import get_object_or_404
24 import django.utils.translation
25 from django.utils.translation import trans_real, ugettext as _
26
27 from weblate.utils import messages
28 from weblate.permissions.helpers import check_access
29 from weblate.trans.exporters import get_exporter
30 from weblate.trans.models import Project, SubProject, Translation
31
32
33 def get_translation(request, project, subproject, lang, skip_acl=False):
34 """Return translation matching parameters."""
35 translation = get_object_or_404(
36 Translation.objects.prefetch(),
37 language__code=lang,
38 subproject__slug=subproject,
39 subproject__project__slug=project,
40 enabled=True
41 )
42 if not skip_acl:
43 check_access(request, translation.subproject.project)
44 return translation
45
46
47 def get_subproject(request, project, subproject, skip_acl=False):
48 """Return subproject matching parameters."""
49 subproject = get_object_or_404(
50 SubProject.objects.prefetch(),
51 project__slug=project,
52 slug=subproject
53 )
54 if not skip_acl:
55 check_access(request, subproject.project)
56 return subproject
57
58
59 def get_project(request, project, skip_acl=False):
60 """Return project matching parameters."""
61 project = get_object_or_404(
62 Project,
63 slug=project,
64 )
65 if not skip_acl:
66 check_access(request, project)
67 return project
68
69
70 def get_project_translation(request, project=None, subproject=None, lang=None):
71 """Return project, subproject, translation tuple for given parameters."""
72
73 if lang is not None and subproject is not None:
74 # Language defined? We can get all
75 translation = get_translation(request, project, subproject, lang)
76 subproject = translation.subproject
77 project = subproject.project
78 else:
79 translation = None
80 if subproject is not None:
81 # Component defined?
82 subproject = get_subproject(request, project, subproject)
83 project = subproject.project
84 elif project is not None:
85 # Only project defined?
86 project = get_project(request, project)
87
88 # Return tuple
89 return project, subproject, translation
90
91
92 def try_set_language(lang):
93 """Try to activate language"""
94
95 try:
96 django.utils.translation.activate(lang)
97 # workaround for https://code.djangoproject.com/ticket/26050
98 # pylint: disable=W0212
99 if trans_real.catalog()._catalog is None:
100 raise Exception('Invalid language!')
101 except Exception:
102 # Ignore failure on activating language
103 django.utils.translation.activate('en')
104
105
106 def import_message(request, count, message_none, message_ok):
107 if count == 0:
108 messages.warning(request, message_none)
109 else:
110 messages.success(request, message_ok % count)
111
112
113 def download_translation_file(translation, fmt=None):
114 if fmt is not None:
115 try:
116 exporter = get_exporter(fmt)(translation=translation)
117 except KeyError:
118 raise Http404('File format not supported')
119 exporter.add_units(translation)
120 return exporter.get_response(
121 '{{project}}-{0}-{{language}}.{{extension}}'.format(
122 translation.subproject.slug
123 )
124 )
125
126 srcfilename = translation.get_filename()
127
128 # Construct file name (do not use real filename as it is usually not
129 # that useful)
130 filename = '{0}-{1}-{2}.{3}'.format(
131 translation.subproject.project.slug,
132 translation.subproject.slug,
133 translation.language.code,
134 translation.store.extension
135 )
136
137 # Create response
138 with open(srcfilename) as handle:
139 response = HttpResponse(
140 handle.read(),
141 content_type=translation.store.mimetype
142 )
143
144 # Fill in response headers
145 response['Content-Disposition'] = 'attachment; filename={0}'.format(
146 filename
147 )
148
149 return response
150
151
152 def show_form_errors(request, form):
153 """Show all form errors as a message."""
154 for error in form.non_field_errors():
155 messages.error(request, error)
156 for field in form:
157 for error in field.errors:
158 messages.error(
159 request,
160 _('Error in parameter %(field)s: %(error)s') % {
161 'field': field.name,
162 'error': error
163 }
164 )
165
[end of weblate/trans/views/helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/weblate/trans/views/helper.py b/weblate/trans/views/helper.py
--- a/weblate/trans/views/helper.py
+++ b/weblate/trans/views/helper.py
@@ -123,6 +123,10 @@
)
)
+ # Force flushing pending units
+ author = translation.get_last_author(True)
+ translation.update_units(author)
+
srcfilename = translation.get_filename()
# Construct file name (do not use real filename as it is usually not
| {"golden_diff": "diff --git a/weblate/trans/views/helper.py b/weblate/trans/views/helper.py\n--- a/weblate/trans/views/helper.py\n+++ b/weblate/trans/views/helper.py\n@@ -123,6 +123,10 @@\n )\n )\n \n+ # Force flushing pending units\n+ author = translation.get_last_author(True)\n+ translation.update_units(author)\n+\n srcfilename = translation.get_filename()\n \n # Construct file name (do not use real filename as it is usually not\n", "issue": "File download is outdated\n### Steps to reproduce\r\n1. Edit string.\r\n2. Donwload original translation file (without conversion).\r\n\r\n### Actual behaviour\r\nThe file does not have recent changes.\r\n\r\n### Expected behaviour\r\nAll changes should be reflected.\r\n\r\n### Server configuration\r\nCurrent master\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2017 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Helper methods for views.\"\"\"\n\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import get_object_or_404\nimport django.utils.translation\nfrom django.utils.translation import trans_real, ugettext as _\n\nfrom weblate.utils import messages\nfrom weblate.permissions.helpers import check_access\nfrom weblate.trans.exporters import get_exporter\nfrom weblate.trans.models import Project, SubProject, Translation\n\n\ndef get_translation(request, project, subproject, lang, skip_acl=False):\n \"\"\"Return translation matching parameters.\"\"\"\n translation = get_object_or_404(\n Translation.objects.prefetch(),\n language__code=lang,\n subproject__slug=subproject,\n subproject__project__slug=project,\n enabled=True\n )\n if not skip_acl:\n check_access(request, translation.subproject.project)\n return translation\n\n\ndef get_subproject(request, project, subproject, skip_acl=False):\n \"\"\"Return subproject matching parameters.\"\"\"\n subproject = get_object_or_404(\n SubProject.objects.prefetch(),\n project__slug=project,\n slug=subproject\n )\n if not skip_acl:\n check_access(request, subproject.project)\n return subproject\n\n\ndef get_project(request, project, skip_acl=False):\n \"\"\"Return project matching parameters.\"\"\"\n project = get_object_or_404(\n Project,\n slug=project,\n )\n if not skip_acl:\n check_access(request, project)\n return project\n\n\ndef get_project_translation(request, project=None, subproject=None, lang=None):\n \"\"\"Return project, subproject, translation tuple for given parameters.\"\"\"\n\n if lang is not None and subproject is not None:\n # Language defined? We can get all\n translation = get_translation(request, project, subproject, lang)\n subproject = translation.subproject\n project = subproject.project\n else:\n translation = None\n if subproject is not None:\n # Component defined?\n subproject = get_subproject(request, project, subproject)\n project = subproject.project\n elif project is not None:\n # Only project defined?\n project = get_project(request, project)\n\n # Return tuple\n return project, subproject, translation\n\n\ndef try_set_language(lang):\n \"\"\"Try to activate language\"\"\"\n\n try:\n django.utils.translation.activate(lang)\n # workaround for https://code.djangoproject.com/ticket/26050\n # pylint: disable=W0212\n if trans_real.catalog()._catalog is None:\n raise Exception('Invalid language!')\n except Exception:\n # Ignore failure on activating language\n django.utils.translation.activate('en')\n\n\ndef import_message(request, count, message_none, message_ok):\n if count == 0:\n messages.warning(request, message_none)\n else:\n messages.success(request, message_ok % count)\n\n\ndef download_translation_file(translation, fmt=None):\n if fmt is not None:\n try:\n exporter = get_exporter(fmt)(translation=translation)\n except KeyError:\n raise Http404('File format not supported')\n exporter.add_units(translation)\n return exporter.get_response(\n '{{project}}-{0}-{{language}}.{{extension}}'.format(\n translation.subproject.slug\n )\n )\n\n srcfilename = translation.get_filename()\n\n # Construct file name (do not use real filename as it is usually not\n # that useful)\n filename = '{0}-{1}-{2}.{3}'.format(\n translation.subproject.project.slug,\n translation.subproject.slug,\n translation.language.code,\n translation.store.extension\n )\n\n # Create response\n with open(srcfilename) as handle:\n response = HttpResponse(\n handle.read(),\n content_type=translation.store.mimetype\n )\n\n # Fill in response headers\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n return response\n\n\ndef show_form_errors(request, form):\n \"\"\"Show all form errors as a message.\"\"\"\n for error in form.non_field_errors():\n messages.error(request, error)\n for field in form:\n for error in field.errors:\n messages.error(\n request,\n _('Error in parameter %(field)s: %(error)s') % {\n 'field': field.name,\n 'error': error\n }\n )\n", "path": "weblate/trans/views/helper.py"}]} | 2,121 | 112 |
gh_patches_debug_3989 | rasdani/github-patches | git_diff | matrix-org__synapse-8583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mypy==0.782 will cause mypy-zope dependency regression in future pip releases
## Description
Pip is planning to use a [new resolver by default from `20.3`](https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020), which is more strict in it's dependency handling.
Currently, `tox`'s `mypy` test environment defines this:
```
deps =
{[base]deps}
mypy==0.782
mypy-zope
```
Given that input, `pip>=20.3` will likely either:
1. Choose an older `mypy-zope` package that works with `mypy==0.782`, causing a regression.
2. ~~Reject installing outright, as this stricter dependency resolver will refuse to install incompatible packages.~~
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Copyright 2014-2017 OpenMarket Ltd
4 # Copyright 2017 Vector Creations Ltd
5 # Copyright 2017-2018 New Vector Ltd
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 import glob
19 import os
20
21 from setuptools import Command, find_packages, setup
22
23 here = os.path.abspath(os.path.dirname(__file__))
24
25
26 # Some notes on `setup.py test`:
27 #
28 # Once upon a time we used to try to make `setup.py test` run `tox` to run the
29 # tests. That's a bad idea for three reasons:
30 #
31 # 1: `setup.py test` is supposed to find out whether the tests work in the
32 # *current* environmentt, not whatever tox sets up.
33 # 2: Empirically, trying to install tox during the test run wasn't working ("No
34 # module named virtualenv").
35 # 3: The tox documentation advises against it[1].
36 #
37 # Even further back in time, we used to use setuptools_trial [2]. That has its
38 # own set of issues: for instance, it requires installation of Twisted to build
39 # an sdist (because the recommended mode of usage is to add it to
40 # `setup_requires`). That in turn means that in order to successfully run tox
41 # you have to have the python header files installed for whichever version of
42 # python tox uses (which is python3 on recent ubuntus, for example).
43 #
44 # So, for now at least, we stick with what appears to be the convention among
45 # Twisted projects, and don't attempt to do anything when someone runs
46 # `setup.py test`; instead we direct people to run `trial` directly if they
47 # care.
48 #
49 # [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command
50 # [2]: https://pypi.python.org/pypi/setuptools_trial
51 class TestCommand(Command):
52 user_options = []
53
54 def initialize_options(self):
55 pass
56
57 def finalize_options(self):
58 pass
59
60 def run(self):
61 print(
62 """Synapse's tests cannot be run via setup.py. To run them, try:
63 PYTHONPATH="." trial tests
64 """
65 )
66
67
68 def read_file(path_segments):
69 """Read a file from the package. Takes a list of strings to join to
70 make the path"""
71 file_path = os.path.join(here, *path_segments)
72 with open(file_path) as f:
73 return f.read()
74
75
76 def exec_file(path_segments):
77 """Execute a single python file to get the variables defined in it"""
78 result = {}
79 code = read_file(path_segments)
80 exec(code, result)
81 return result
82
83
84 version = exec_file(("synapse", "__init__.py"))["__version__"]
85 dependencies = exec_file(("synapse", "python_dependencies.py"))
86 long_description = read_file(("README.rst",))
87
88 REQUIREMENTS = dependencies["REQUIREMENTS"]
89 CONDITIONAL_REQUIREMENTS = dependencies["CONDITIONAL_REQUIREMENTS"]
90 ALL_OPTIONAL_REQUIREMENTS = dependencies["ALL_OPTIONAL_REQUIREMENTS"]
91
92 # Make `pip install matrix-synapse[all]` install all the optional dependencies.
93 CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)
94
95 # Developer dependencies should not get included in "all".
96 #
97 # We pin black so that our tests don't start failing on new releases.
98 CONDITIONAL_REQUIREMENTS["lint"] = [
99 "isort==5.0.3",
100 "black==19.10b0",
101 "flake8-comprehensions",
102 "flake8",
103 ]
104
105 # Dependencies which are exclusively required by unit test code. This is
106 # NOT a list of all modules that are necessary to run the unit tests.
107 # Tests assume that all optional dependencies are installed.
108 #
109 # parameterized_class decorator was introduced in parameterized 0.7.0
110 CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"]
111
112 setup(
113 name="matrix-synapse",
114 version=version,
115 packages=find_packages(exclude=["tests", "tests.*"]),
116 description="Reference homeserver for the Matrix decentralised comms protocol",
117 install_requires=REQUIREMENTS,
118 extras_require=CONDITIONAL_REQUIREMENTS,
119 include_package_data=True,
120 zip_safe=False,
121 long_description=long_description,
122 python_requires="~=3.5",
123 classifiers=[
124 "Development Status :: 5 - Production/Stable",
125 "Topic :: Communications :: Chat",
126 "License :: OSI Approved :: Apache Software License",
127 "Programming Language :: Python :: 3 :: Only",
128 "Programming Language :: Python :: 3.5",
129 "Programming Language :: Python :: 3.6",
130 "Programming Language :: Python :: 3.7",
131 "Programming Language :: Python :: 3.8",
132 ],
133 scripts=["synctl"] + glob.glob("scripts/*"),
134 cmdclass={"test": TestCommand},
135 )
136
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -102,6 +102,8 @@
"flake8",
]
+CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.790", "mypy-zope"]
+
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -102,6 +102,8 @@\n \"flake8\",\n ]\n \n+CONDITIONAL_REQUIREMENTS[\"mypy\"] = [\"mypy==0.790\", \"mypy-zope\"]\n+\n # Dependencies which are exclusively required by unit test code. This is\n # NOT a list of all modules that are necessary to run the unit tests.\n # Tests assume that all optional dependencies are installed.\n", "issue": "mypy==0.782 will cause mypy-zope dependency regression in future pip releases\n## Description\r\n\r\nPip is planning to use a [new resolver by default from `20.3`](https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020), which is more strict in it's dependency handling.\r\n\r\nCurrently, `tox`'s `mypy` test environment defines this:\r\n\r\n```\r\ndeps =\r\n {[base]deps}\r\n mypy==0.782\r\n mypy-zope\r\n```\r\n\r\nGiven that input, `pip>=20.3` will likely either:\r\n1. Choose an older `mypy-zope` package that works with `mypy==0.782`, causing a regression.\r\n2. ~~Reject installing outright, as this stricter dependency resolver will refuse to install incompatible packages.~~\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright 2014-2017 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2017-2018 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\n\nfrom setuptools import Command, find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n# Some notes on `setup.py test`:\n#\n# Once upon a time we used to try to make `setup.py test` run `tox` to run the\n# tests. That's a bad idea for three reasons:\n#\n# 1: `setup.py test` is supposed to find out whether the tests work in the\n# *current* environmentt, not whatever tox sets up.\n# 2: Empirically, trying to install tox during the test run wasn't working (\"No\n# module named virtualenv\").\n# 3: The tox documentation advises against it[1].\n#\n# Even further back in time, we used to use setuptools_trial [2]. That has its\n# own set of issues: for instance, it requires installation of Twisted to build\n# an sdist (because the recommended mode of usage is to add it to\n# `setup_requires`). That in turn means that in order to successfully run tox\n# you have to have the python header files installed for whichever version of\n# python tox uses (which is python3 on recent ubuntus, for example).\n#\n# So, for now at least, we stick with what appears to be the convention among\n# Twisted projects, and don't attempt to do anything when someone runs\n# `setup.py test`; instead we direct people to run `trial` directly if they\n# care.\n#\n# [1]: http://tox.readthedocs.io/en/2.5.0/example/basic.html#integration-with-setup-py-test-command\n# [2]: https://pypi.python.org/pypi/setuptools_trial\nclass TestCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print(\n \"\"\"Synapse's tests cannot be run via setup.py. To run them, try:\n PYTHONPATH=\".\" trial tests\n\"\"\"\n )\n\n\ndef read_file(path_segments):\n \"\"\"Read a file from the package. Takes a list of strings to join to\n make the path\"\"\"\n file_path = os.path.join(here, *path_segments)\n with open(file_path) as f:\n return f.read()\n\n\ndef exec_file(path_segments):\n \"\"\"Execute a single python file to get the variables defined in it\"\"\"\n result = {}\n code = read_file(path_segments)\n exec(code, result)\n return result\n\n\nversion = exec_file((\"synapse\", \"__init__.py\"))[\"__version__\"]\ndependencies = exec_file((\"synapse\", \"python_dependencies.py\"))\nlong_description = read_file((\"README.rst\",))\n\nREQUIREMENTS = dependencies[\"REQUIREMENTS\"]\nCONDITIONAL_REQUIREMENTS = dependencies[\"CONDITIONAL_REQUIREMENTS\"]\nALL_OPTIONAL_REQUIREMENTS = dependencies[\"ALL_OPTIONAL_REQUIREMENTS\"]\n\n# Make `pip install matrix-synapse[all]` install all the optional dependencies.\nCONDITIONAL_REQUIREMENTS[\"all\"] = list(ALL_OPTIONAL_REQUIREMENTS)\n\n# Developer dependencies should not get included in \"all\".\n#\n# We pin black so that our tests don't start failing on new releases.\nCONDITIONAL_REQUIREMENTS[\"lint\"] = [\n \"isort==5.0.3\",\n \"black==19.10b0\",\n \"flake8-comprehensions\",\n \"flake8\",\n]\n\n# Dependencies which are exclusively required by unit test code. This is\n# NOT a list of all modules that are necessary to run the unit tests.\n# Tests assume that all optional dependencies are installed.\n#\n# parameterized_class decorator was introduced in parameterized 0.7.0\nCONDITIONAL_REQUIREMENTS[\"test\"] = [\"mock>=2.0\", \"parameterized>=0.7.0\"]\n\nsetup(\n name=\"matrix-synapse\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n description=\"Reference homeserver for the Matrix decentralised comms protocol\",\n install_requires=REQUIREMENTS,\n extras_require=CONDITIONAL_REQUIREMENTS,\n include_package_data=True,\n zip_safe=False,\n long_description=long_description,\n python_requires=\"~=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Communications :: Chat\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n scripts=[\"synctl\"] + glob.glob(\"scripts/*\"),\n cmdclass={\"test\": TestCommand},\n)\n", "path": "setup.py"}]} | 2,223 | 108 |
gh_patches_debug_23 | rasdani/github-patches | git_diff | horovod__horovod-3745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No module named 'packaging' when install horovod
It seems that the horovod v0.26.0 has some dependency problems.
How long does it take for a new patch version to be released or should I pin the horovod version? ^_^

</issue>
<code>
[start of horovod/__init__.py]
1 from horovod.runner import run
2
3 __version__ = '0.26.0'
4
[end of horovod/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/horovod/__init__.py b/horovod/__init__.py
--- a/horovod/__init__.py
+++ b/horovod/__init__.py
@@ -1,3 +1,3 @@
from horovod.runner import run
-__version__ = '0.26.0'
+__version__ = '0.26.1'
| {"golden_diff": "diff --git a/horovod/__init__.py b/horovod/__init__.py\n--- a/horovod/__init__.py\n+++ b/horovod/__init__.py\n@@ -1,3 +1,3 @@\n from horovod.runner import run\n \n-__version__ = '0.26.0'\n+__version__ = '0.26.1'\n", "issue": "No module named 'packaging' when install horovod\nIt seems that the horovod v0.26.0 has some dependency problems.\r\n\r\nHow long does it take for a new patch version to be released or should I pin the horovod version? ^_^\r\n\r\n\r\n\n", "before_files": [{"content": "from horovod.runner import run\n\n__version__ = '0.26.0'\n", "path": "horovod/__init__.py"}]} | 688 | 88 |
gh_patches_debug_29710 | rasdani/github-patches | git_diff | keras-team__autokeras-166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Model evaluation on test set yields different results on the same model
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
Test set evaluation yields different results if re-run.
### Reproducing Steps
Steps to reproduce the behavior:
re-run multiple times:
```python
y_pred = clf.evaluate(X_test, y_test)
print(y_pred)
```
will yield different results:
```
0.0992
0.1032
0.101
0.0989
```
Further, using manual evaluation:
```python
y_prediction = clf.predict(x_test=X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_pred=y_prediction, y_true=y_test)
```
leads to different results as well. It looks like the model either uses some random function (AFAIK all: ```random_states=42``` in the source code) or there is some major error in the pipeline that causes different predictions of the test set all the time.
### Expected Behavior
Final evaluation on a test set should not lead to different results using the same model on the same data.
### Setup Details
Include the details about the versions of:
- OS type and version: Linux
- Python: 3.6.5
- autokeras: 0.2.11
- scikit-learn:0.19.1
- numpy:1.14.5
- keras: 2.2.2
- scipy:1.1.0
- tensorflow: 1.10.0
- pytorch:0.4.1
### Additional context
This error is verified on MNIST and Fashion-MNIST.
</issue>
<code>
[start of autokeras/preprocessor.py]
1 import torch
2
3 import numpy as np
4 from torch.utils.data import Dataset, DataLoader
5 from torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose
6
7 from autokeras.constant import Constant
8
9
10 class OneHotEncoder:
11 """A class that can format data.
12
13 This class provides ways to transform data's classification label into vector.
14
15 Attributes:
16 data: The input data
17 n_classes: The number of classes in the classification problem.
18 labels: The number of labels.
19 label_to_vec: Mapping from label to vector.
20 int_to_label: Mapping from int to label.
21 """
22
23 def __init__(self):
24 """Initialize a OneHotEncoder"""
25 self.data = None
26 self.n_classes = 0
27 self.labels = None
28 self.label_to_vec = {}
29 self.int_to_label = {}
30
31 def fit(self, data):
32 """Create mapping from label to vector, and vector to label."""
33 data = np.array(data).flatten()
34 self.labels = set(data)
35 self.n_classes = len(self.labels)
36 for index, label in enumerate(self.labels):
37 vec = np.array([0] * self.n_classes)
38 vec[index] = 1
39 self.label_to_vec[label] = vec
40 self.int_to_label[index] = label
41
42 def transform(self, data):
43 """Get vector for every element in the data array."""
44 data = np.array(data)
45 if len(data.shape) > 1:
46 data = data.flatten()
47 return np.array(list(map(lambda x: self.label_to_vec[x], data)))
48
49 def inverse_transform(self, data):
50 """Get label for every element in data."""
51 return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))
52
53
54 class DataTransformer:
55 def __init__(self, data, augment=Constant.DATA_AUGMENTATION):
56 self.max_val = data.max()
57 data = data / self.max_val
58 self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten()
59 self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten()
60 self.augment = augment
61
62 def transform_train(self, data, targets=None, batch_size=None):
63 if not self.augment:
64 augment_list = []
65 else:
66 augment_list = [ToPILImage(),
67 RandomCrop(data.shape[1:3], padding=4),
68 RandomHorizontalFlip(),
69 ToTensor()
70 ]
71 common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
72 compose_list = augment_list + common_list
73
74 return self._transform(batch_size, compose_list, data, targets)
75
76 def transform_test(self, data, targets=None, batch_size=None):
77 common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
78 compose_list = common_list
79
80 return self._transform(batch_size, compose_list, data, targets)
81
82 def _transform(self, batch_size, compose_list, data, targets):
83 if batch_size is None:
84 batch_size = Constant.MAX_BATCH_SIZE
85 batch_size = min(len(data), batch_size)
86 data = data / self.max_val
87 data = torch.Tensor(data.transpose(0, 3, 1, 2))
88 data_transforms = Compose(compose_list)
89 dataset = MultiTransformDataset(data, targets, data_transforms)
90 return DataLoader(dataset, batch_size=batch_size, shuffle=True)
91
92
93 class MultiTransformDataset(Dataset):
94 def __init__(self, dataset, target, compose):
95 self.dataset = dataset
96 self.target = target
97 self.compose = compose
98
99 def __getitem__(self, index):
100 feature = self.dataset[index]
101 if self.target is None:
102 return self.compose(feature)
103 return self.compose(feature), self.target[index]
104
105 def __len__(self):
106 return len(self.dataset)
107
[end of autokeras/preprocessor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autokeras/preprocessor.py b/autokeras/preprocessor.py
--- a/autokeras/preprocessor.py
+++ b/autokeras/preprocessor.py
@@ -71,23 +71,31 @@
common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
compose_list = augment_list + common_list
- return self._transform(batch_size, compose_list, data, targets)
+ dataset = self._transform(compose_list, data, targets)
+
+ if batch_size is None:
+ batch_size = Constant.MAX_BATCH_SIZE
+ batch_size = min(len(data), batch_size)
+
+ return DataLoader(dataset, batch_size=batch_size, shuffle=True)
def transform_test(self, data, targets=None, batch_size=None):
common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]
compose_list = common_list
- return self._transform(batch_size, compose_list, data, targets)
+ dataset = self._transform(compose_list, data, targets)
- def _transform(self, batch_size, compose_list, data, targets):
if batch_size is None:
batch_size = Constant.MAX_BATCH_SIZE
batch_size = min(len(data), batch_size)
+
+ return DataLoader(dataset, batch_size=batch_size, shuffle=False)
+
+ def _transform(self, compose_list, data, targets):
data = data / self.max_val
data = torch.Tensor(data.transpose(0, 3, 1, 2))
data_transforms = Compose(compose_list)
- dataset = MultiTransformDataset(data, targets, data_transforms)
- return DataLoader(dataset, batch_size=batch_size, shuffle=True)
+ return MultiTransformDataset(data, targets, data_transforms)
class MultiTransformDataset(Dataset):
| {"golden_diff": "diff --git a/autokeras/preprocessor.py b/autokeras/preprocessor.py\n--- a/autokeras/preprocessor.py\n+++ b/autokeras/preprocessor.py\n@@ -71,23 +71,31 @@\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n \n- return self._transform(batch_size, compose_list, data, targets)\n+ dataset = self._transform(compose_list, data, targets)\n+\n+ if batch_size is None:\n+ batch_size = Constant.MAX_BATCH_SIZE\n+ batch_size = min(len(data), batch_size)\n+\n+ return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n \n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n \n- return self._transform(batch_size, compose_list, data, targets)\n+ dataset = self._transform(compose_list, data, targets)\n \n- def _transform(self, batch_size, compose_list, data, targets):\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n+\n+ return DataLoader(dataset, batch_size=batch_size, shuffle=False)\n+\n+ def _transform(self, compose_list, data, targets):\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n- dataset = MultiTransformDataset(data, targets, data_transforms)\n- return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n+ return MultiTransformDataset(data, targets, data_transforms)\n \n \n class MultiTransformDataset(Dataset):\n", "issue": "Model evaluation on test set yields different results on the same model\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\nTest set evaluation yields different results if re-run.\r\n\r\n### Reproducing Steps\r\nSteps to reproduce the behavior:\r\nre-run multiple times:\r\n```python\r\ny_pred = clf.evaluate(X_test, y_test)\r\nprint(y_pred)\r\n```\r\nwill yield different results:\r\n```\r\n0.0992\r\n0.1032\r\n0.101\r\n0.0989\r\n```\r\n\r\nFurther, using manual evaluation:\r\n```python\r\ny_prediction = clf.predict(x_test=X_test)\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(y_pred=y_prediction, y_true=y_test)\r\n```\r\nleads to different results as well. It looks like the model either uses some random function (AFAIK all: ```random_states=42``` in the source code) or there is some major error in the pipeline that causes different predictions of the test set all the time.\r\n\r\n### Expected Behavior\r\nFinal evaluation on a test set should not lead to different results using the same model on the same data.\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: Linux\r\n - Python: 3.6.5\r\n - autokeras: 0.2.11\r\n - scikit-learn:0.19.1\r\n - numpy:1.14.5\r\n - keras: 2.2.2\r\n - scipy:1.1.0\r\n - tensorflow: 1.10.0\r\n - pytorch:0.4.1\r\n\r\n### Additional context\r\nThis error is verified on MNIST and Fashion-MNIST.\n", "before_files": [{"content": "import torch\n\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import ToPILImage, RandomCrop, RandomHorizontalFlip, ToTensor, Normalize, Compose\n\nfrom autokeras.constant import Constant\n\n\nclass OneHotEncoder:\n \"\"\"A class that can format data.\n\n This class provides ways to transform data's classification label into vector.\n\n Attributes:\n data: The input data\n n_classes: The number of classes in the classification problem.\n labels: The number of labels.\n label_to_vec: Mapping from label to vector.\n int_to_label: Mapping from int to label.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a OneHotEncoder\"\"\"\n self.data = None\n self.n_classes = 0\n self.labels = None\n self.label_to_vec = {}\n self.int_to_label = {}\n\n def fit(self, data):\n \"\"\"Create mapping from label to vector, and vector to label.\"\"\"\n data = np.array(data).flatten()\n self.labels = set(data)\n self.n_classes = len(self.labels)\n for index, label in enumerate(self.labels):\n vec = np.array([0] * self.n_classes)\n vec[index] = 1\n self.label_to_vec[label] = vec\n self.int_to_label[index] = label\n\n def transform(self, data):\n \"\"\"Get vector for every element in the data array.\"\"\"\n data = np.array(data)\n if len(data.shape) > 1:\n data = data.flatten()\n return np.array(list(map(lambda x: self.label_to_vec[x], data)))\n\n def inverse_transform(self, data):\n \"\"\"Get label for every element in data.\"\"\"\n return np.array(list(map(lambda x: self.int_to_label[x], np.argmax(np.array(data), axis=1))))\n\n\nclass DataTransformer:\n def __init__(self, data, augment=Constant.DATA_AUGMENTATION):\n self.max_val = data.max()\n data = data / self.max_val\n self.mean = np.mean(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.std = np.std(data, axis=(0, 1, 2), keepdims=True).flatten()\n self.augment = augment\n\n def transform_train(self, data, targets=None, batch_size=None):\n if not self.augment:\n augment_list = []\n else:\n augment_list = [ToPILImage(),\n RandomCrop(data.shape[1:3], padding=4),\n RandomHorizontalFlip(),\n ToTensor()\n ]\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = augment_list + common_list\n\n return self._transform(batch_size, compose_list, data, targets)\n\n def transform_test(self, data, targets=None, batch_size=None):\n common_list = [Normalize(torch.Tensor(self.mean), torch.Tensor(self.std))]\n compose_list = common_list\n\n return self._transform(batch_size, compose_list, data, targets)\n\n def _transform(self, batch_size, compose_list, data, targets):\n if batch_size is None:\n batch_size = Constant.MAX_BATCH_SIZE\n batch_size = min(len(data), batch_size)\n data = data / self.max_val\n data = torch.Tensor(data.transpose(0, 3, 1, 2))\n data_transforms = Compose(compose_list)\n dataset = MultiTransformDataset(data, targets, data_transforms)\n return DataLoader(dataset, batch_size=batch_size, shuffle=True)\n\n\nclass MultiTransformDataset(Dataset):\n def __init__(self, dataset, target, compose):\n self.dataset = dataset\n self.target = target\n self.compose = compose\n\n def __getitem__(self, index):\n feature = self.dataset[index]\n if self.target is None:\n return self.compose(feature)\n return self.compose(feature), self.target[index]\n\n def __len__(self):\n return len(self.dataset)\n", "path": "autokeras/preprocessor.py"}]} | 2,013 | 392 |
gh_patches_debug_9194 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show results framework for private projects
## Test plan
GIVEN the 'My results' tab in MyRSR
WHEN searching for a private project
THEN the project should appear in the list
WHEN the private project is opened in 'My results'
THEN the user should have full access to the results framework, just like a regular public project
## Issue description
Currently, we have disabled the results framework in MyRSR for private projects. However, it should be possible to enter results data for private projects as well.
</issue>
<code>
[start of akvo/rest/views/typeahead.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4 See more details in the license.txt file located at the root folder of the
5 Akvo RSR module. For additional details on the GNU license please
6 see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from akvo.rest.serializers import (TypeaheadCountrySerializer,
10 TypeaheadOrganisationSerializer,
11 TypeaheadProjectSerializer,
12 TypeaheadProjectUpdateSerializer)
13 from akvo.rsr.models import Country, Organisation, Project, ProjectUpdate
14
15 from rest_framework.decorators import api_view
16 from rest_framework.response import Response
17
18
19 def rejig(queryset, serializer):
20 """Rearrange & add queryset count to the response data."""
21 return {
22 'count': queryset.count(),
23 'results': serializer.data
24 }
25
26
27 @api_view(['GET'])
28 def typeahead_country(request):
29 countries = Country.objects.all()
30 return Response(
31 rejig(countries, TypeaheadCountrySerializer(countries, many=True))
32 )
33
34
35 @api_view(['GET'])
36 def typeahead_organisation(request):
37 organisations = Organisation.objects.all()
38 return Response(
39 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
40 many=True))
41 )
42
43
44 @api_view(['GET'])
45 def typeahead_user_organisations(request):
46 user = request.user
47 is_admin = user.is_active and (user.is_superuser or user.is_admin)
48 organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()
49 return Response(
50 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
51 many=True))
52 )
53
54
55 @api_view(['GET'])
56 def typeahead_project(request):
57 projects = Project.objects.all().exclude(title='')
58 return Response(
59 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
60 )
61
62
63 @api_view(['GET'])
64 def typeahead_user_projects(request):
65 user = request.user
66 is_admin = user.is_active and (user.is_superuser or user.is_admin)
67 if is_admin:
68 projects = Project.objects.all()
69 else:
70 projects = user.approved_organisations().all_projects()
71 projects = projects.exclude(title='')
72 return Response(
73 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
74 )
75
76
77 @api_view(['GET'])
78 def typeahead_impact_projects(request):
79 user = request.user
80 projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
81 projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')
82
83 return Response(
84 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
85 )
86
87
88 @api_view(['GET'])
89 def typeahead_projectupdate(request):
90 updates = ProjectUpdate.objects.all()
91 return Response(
92 rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))
93 )
94
[end of akvo/rest/views/typeahead.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py
--- a/akvo/rest/views/typeahead.py
+++ b/akvo/rest/views/typeahead.py
@@ -78,7 +78,7 @@
def typeahead_impact_projects(request):
user = request.user
projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
- projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')
+ projects = projects.published().filter(is_impact_project=True).order_by('title')
return Response(
rejig(projects, TypeaheadProjectSerializer(projects, many=True))
| {"golden_diff": "diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py\n--- a/akvo/rest/views/typeahead.py\n+++ b/akvo/rest/views/typeahead.py\n@@ -78,7 +78,7 @@\n def typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n- projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')\n+ projects = projects.published().filter(is_impact_project=True).order_by('title')\n \n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n", "issue": "Show results framework for private projects\n## Test plan\n\nGIVEN the 'My results' tab in MyRSR\nWHEN searching for a private project\nTHEN the project should appear in the list\n\nWHEN the private project is opened in 'My results'\nTHEN the user should have full access to the results framework, just like a regular public project\n## Issue description\n\nCurrently, we have disabled the results framework in MyRSR for private projects. However, it should be possible to enter results data for private projects as well.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\nfrom akvo.rsr.models import Country, Organisation, Project, ProjectUpdate\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n countries = Country.objects.all()\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n projects = Project.objects.all().exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True, is_public=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}]} | 1,456 | 153 |
gh_patches_debug_24995 | rasdani/github-patches | git_diff | e-valuation__EvaP-1853 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Investigate Github Action caching
One can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain.
</issue>
<code>
[start of evap/evaluation/management/commands/scss.py]
1 import os
2 import subprocess # nosec
3
4 from django.conf import settings
5 from django.core.management.base import BaseCommand
6
7
8 class Command(BaseCommand):
9 def add_arguments(self, parser):
10 parser.add_argument(
11 "--watch",
12 action="store_true",
13 help="Watch stylesheets and recompile when they change.",
14 )
15 parser.add_argument(
16 "--production",
17 action="store_true",
18 help="Compress output stylesheet and do not generate source maps."
19 " Intended to use in production deployment.",
20 )
21
22 def handle(self, *args, **options):
23 static_directory = settings.STATICFILES_DIRS[0]
24 command = [
25 "npx",
26 "sass",
27 os.path.join(static_directory, "scss", "evap.scss"),
28 os.path.join(static_directory, "css", "evap.css"),
29 ]
30
31 if options["watch"]:
32 command += ["--watch", "--poll"]
33
34 if options["production"]:
35 command += ["--style", "compressed", "--no-source-map"]
36
37 try:
38 subprocess.run(command, check=True) # nosec
39 except FileNotFoundError:
40 print("Could not find sass command", file=self.stderr)
41 except KeyboardInterrupt:
42 pass
43
[end of evap/evaluation/management/commands/scss.py]
[start of evap/evaluation/management/commands/ts.py]
1 import argparse
2 import os
3 import subprocess # nosec
4 import unittest
5
6 from django.conf import settings
7 from django.core.management import call_command
8 from django.core.management.base import BaseCommand, CommandError
9 from django.test.runner import DiscoverRunner
10
11
12 class RenderPagesRunner(DiscoverRunner):
13 """Test runner which only includes `render_pages.*` methods.
14 The actual logic of the page rendering is implemented in the `@render_pages` decorator."""
15
16 test_loader = unittest.TestLoader()
17
18 def __init__(self, **kwargs):
19 super().__init__(**kwargs)
20 self.test_loader.testMethodPrefix = "render_pages"
21
22
23 class Command(BaseCommand):
24 def add_arguments(self, parser: argparse.ArgumentParser):
25 subparsers = parser.add_subparsers(dest="command", required=True)
26 compile_parser = subparsers.add_parser("compile")
27 compile_parser.add_argument(
28 "--watch",
29 action="store_true",
30 help="Watch scripts and recompile when they change.",
31 )
32 self.add_fresh_argument(compile_parser)
33 test_parser = subparsers.add_parser("test")
34 self.add_fresh_argument(test_parser)
35 subparsers.add_parser("render_pages")
36
37 @staticmethod
38 def add_fresh_argument(parser: argparse.ArgumentParser):
39 parser.add_argument(
40 "--fresh",
41 action="store_true",
42 help="Delete .tsbuildinfo.json before compilation to force a fresh compilation."
43 "This is useful when incremental compilation does not yield the expected output.",
44 )
45
46 def handle(self, *args, **options):
47 if options["command"] == "compile":
48 self.compile(**options)
49 elif options["command"] == "test":
50 self.test(**options)
51 elif options["command"] == "render_pages":
52 self.render_pages(**options)
53
54 def run_command(self, command):
55 try:
56 subprocess.run(command, check=True) # nosec
57 except FileNotFoundError:
58 print(f"Could not find {command[0]} command", file=self.stderr)
59 except KeyboardInterrupt:
60 pass
61 except subprocess.CalledProcessError as e:
62 raise CommandError("Error during command execution", returncode=e.returncode) from e
63
64 def compile(self, watch=False, fresh=False, **_options):
65 static_directory = settings.STATICFILES_DIRS[0]
66 command = [
67 "npx",
68 "tsc",
69 "--project",
70 os.path.join(static_directory, "ts", "tsconfig.compile.json"),
71 ]
72
73 if watch:
74 command += ["--watch"]
75
76 if fresh:
77 try:
78 os.remove(os.path.join(static_directory, "ts", ".tsbuildinfo.json"))
79 except FileNotFoundError:
80 pass
81
82 self.run_command(command)
83
84 def test(self, **options):
85 call_command("scss")
86 self.compile(**options)
87 self.render_pages()
88 self.run_command(["npx", "jest"])
89
90 @staticmethod
91 def render_pages(**_options):
92 # Enable debug mode as otherwise a collectstatic beforehand would be necessary,
93 # as missing static files would result into an error.
94 test_runner = RenderPagesRunner(debug_mode=True)
95 failed_tests = test_runner.run_tests([])
96 if failed_tests > 0:
97 raise CommandError("Failures during render_pages")
98
[end of evap/evaluation/management/commands/ts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py
--- a/evap/evaluation/management/commands/scss.py
+++ b/evap/evaluation/management/commands/scss.py
@@ -2,7 +2,7 @@
import subprocess # nosec
from django.conf import settings
-from django.core.management.base import BaseCommand
+from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
@@ -36,7 +36,7 @@
try:
subprocess.run(command, check=True) # nosec
- except FileNotFoundError:
- print("Could not find sass command", file=self.stderr)
+ except FileNotFoundError as e:
+ raise CommandError("Could not find sass command") from e
except KeyboardInterrupt:
pass
diff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py
--- a/evap/evaluation/management/commands/ts.py
+++ b/evap/evaluation/management/commands/ts.py
@@ -54,8 +54,8 @@
def run_command(self, command):
try:
subprocess.run(command, check=True) # nosec
- except FileNotFoundError:
- print(f"Could not find {command[0]} command", file=self.stderr)
+ except FileNotFoundError as e:
+ raise CommandError(f"Could not find {command[0]} command") from e
except KeyboardInterrupt:
pass
except subprocess.CalledProcessError as e:
| {"golden_diff": "diff --git a/evap/evaluation/management/commands/scss.py b/evap/evaluation/management/commands/scss.py\n--- a/evap/evaluation/management/commands/scss.py\n+++ b/evap/evaluation/management/commands/scss.py\n@@ -2,7 +2,7 @@\n import subprocess # nosec\n \n from django.conf import settings\n-from django.core.management.base import BaseCommand\n+from django.core.management.base import BaseCommand, CommandError\n \n \n class Command(BaseCommand):\n@@ -36,7 +36,7 @@\n \n try:\n subprocess.run(command, check=True) # nosec\n- except FileNotFoundError:\n- print(\"Could not find sass command\", file=self.stderr)\n+ except FileNotFoundError as e:\n+ raise CommandError(\"Could not find sass command\") from e\n except KeyboardInterrupt:\n pass\ndiff --git a/evap/evaluation/management/commands/ts.py b/evap/evaluation/management/commands/ts.py\n--- a/evap/evaluation/management/commands/ts.py\n+++ b/evap/evaluation/management/commands/ts.py\n@@ -54,8 +54,8 @@\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n- except FileNotFoundError:\n- print(f\"Could not find {command[0]} command\", file=self.stderr)\n+ except FileNotFoundError as e:\n+ raise CommandError(f\"Could not find {command[0]} command\") from e\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n", "issue": "Investigate Github Action caching\nOne can cache certain directories in github actions. For Python, caching the entire installation is used (including all the `site-packages`), so that wheels don't need to be rebuild. Additionally, the download could be faster. We should investigate how much there is to gain.\n", "before_files": [{"content": "import os\nimport subprocess # nosec\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch stylesheets and recompile when they change.\",\n )\n parser.add_argument(\n \"--production\",\n action=\"store_true\",\n help=\"Compress output stylesheet and do not generate source maps.\"\n \" Intended to use in production deployment.\",\n )\n\n def handle(self, *args, **options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"sass\",\n os.path.join(static_directory, \"scss\", \"evap.scss\"),\n os.path.join(static_directory, \"css\", \"evap.css\"),\n ]\n\n if options[\"watch\"]:\n command += [\"--watch\", \"--poll\"]\n\n if options[\"production\"]:\n command += [\"--style\", \"compressed\", \"--no-source-map\"]\n\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError:\n print(\"Could not find sass command\", file=self.stderr)\n except KeyboardInterrupt:\n pass\n", "path": "evap/evaluation/management/commands/scss.py"}, {"content": "import argparse\nimport os\nimport subprocess # nosec\nimport unittest\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.test.runner import DiscoverRunner\n\n\nclass RenderPagesRunner(DiscoverRunner):\n \"\"\"Test runner which only includes `render_pages.*` methods.\n The actual logic of the page rendering is implemented in the `@render_pages` decorator.\"\"\"\n\n test_loader = unittest.TestLoader()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.test_loader.testMethodPrefix = \"render_pages\"\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser: argparse.ArgumentParser):\n subparsers = parser.add_subparsers(dest=\"command\", required=True)\n compile_parser = subparsers.add_parser(\"compile\")\n compile_parser.add_argument(\n \"--watch\",\n action=\"store_true\",\n help=\"Watch scripts and recompile when they change.\",\n )\n self.add_fresh_argument(compile_parser)\n test_parser = subparsers.add_parser(\"test\")\n self.add_fresh_argument(test_parser)\n subparsers.add_parser(\"render_pages\")\n\n @staticmethod\n def add_fresh_argument(parser: argparse.ArgumentParser):\n parser.add_argument(\n \"--fresh\",\n action=\"store_true\",\n help=\"Delete .tsbuildinfo.json before compilation to force a fresh compilation.\"\n \"This is useful when incremental compilation does not yield the expected output.\",\n )\n\n def handle(self, *args, **options):\n if options[\"command\"] == \"compile\":\n self.compile(**options)\n elif options[\"command\"] == \"test\":\n self.test(**options)\n elif options[\"command\"] == \"render_pages\":\n self.render_pages(**options)\n\n def run_command(self, command):\n try:\n subprocess.run(command, check=True) # nosec\n except FileNotFoundError:\n print(f\"Could not find {command[0]} command\", file=self.stderr)\n except KeyboardInterrupt:\n pass\n except subprocess.CalledProcessError as e:\n raise CommandError(\"Error during command execution\", returncode=e.returncode) from e\n\n def compile(self, watch=False, fresh=False, **_options):\n static_directory = settings.STATICFILES_DIRS[0]\n command = [\n \"npx\",\n \"tsc\",\n \"--project\",\n os.path.join(static_directory, \"ts\", \"tsconfig.compile.json\"),\n ]\n\n if watch:\n command += [\"--watch\"]\n\n if fresh:\n try:\n os.remove(os.path.join(static_directory, \"ts\", \".tsbuildinfo.json\"))\n except FileNotFoundError:\n pass\n\n self.run_command(command)\n\n def test(self, **options):\n call_command(\"scss\")\n self.compile(**options)\n self.render_pages()\n self.run_command([\"npx\", \"jest\"])\n\n @staticmethod\n def render_pages(**_options):\n # Enable debug mode as otherwise a collectstatic beforehand would be necessary,\n # as missing static files would result into an error.\n test_runner = RenderPagesRunner(debug_mode=True)\n failed_tests = test_runner.run_tests([])\n if failed_tests > 0:\n raise CommandError(\"Failures during render_pages\")\n", "path": "evap/evaluation/management/commands/ts.py"}]} | 1,849 | 348 |
gh_patches_debug_3823 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-5701 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Latest release broken on Ubuntu 22.04 - ImportError: libffi.so.7: cannot open shared object file: No such file or directory
#### Problem Description
https://askubuntu.com/questions/1286772/libffi-so-7-cannot-open-shared-object-file-no-such-file-or-directory
I don't think our users should do anything with their system to make the binary run, correct?
```
$ ./mitmdump
Traceback (most recent call last):
File "mitmdump", line 3, in <module>
File "mitmproxy/tools/main.py", line 131, in mitmdump
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "mitmproxy/tools/dump.py", line 1, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "mitmproxy/addons/__init__.py", line 10, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "mitmproxy/addons/cut.py", line 14, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "pyperclip/__init__.py", line 52, in <module>
File "PyInstaller/loader/pyimod02_importers.py", line 499, in exec_module
File "ctypes/__init__.py", line 8, in <module>
ImportError: libffi.so.7: cannot open shared object file: No such file or directory
[50935] Failed to execute script 'mitmdump' due to unhandled exception!
```
#### Steps to reproduce the behavior:
1. `mitmdump`
Repros in a clean `docker run --rm -it ubuntu:22.04` as well
#### System Information
9.0.0 just downloaded (I cannot run `mitmdump --version` as it doesn't launch)
</issue>
<code>
[start of setup.py]
1 import os
2 import re
3 from codecs import open
4
5 from setuptools import find_packages, setup
6
7 # Based on https://github.com/pypa/sampleproject/blob/main/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14 long_description_content_type = "text/markdown"
15
16 with open(os.path.join(here, "mitmproxy/version.py")) as f:
17 match = re.search(r'VERSION = "(.+?)"', f.read())
18 assert match
19 VERSION = match.group(1)
20
21 setup(
22 name="mitmproxy",
23 version=VERSION,
24 description="An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.",
25 long_description=long_description,
26 long_description_content_type=long_description_content_type,
27 url="http://mitmproxy.org",
28 author="Aldo Cortesi",
29 author_email="[email protected]",
30 license="MIT",
31 classifiers=[
32 "License :: OSI Approved :: MIT License",
33 "Development Status :: 5 - Production/Stable",
34 "Environment :: Console :: Curses",
35 "Operating System :: MacOS",
36 "Operating System :: POSIX",
37 "Operating System :: Microsoft :: Windows",
38 "Programming Language :: Python :: 3 :: Only",
39 "Programming Language :: Python :: 3.9",
40 "Programming Language :: Python :: 3.10",
41 "Programming Language :: Python :: 3.11",
42 "Programming Language :: Python :: Implementation :: CPython",
43 "Topic :: Security",
44 "Topic :: Internet :: WWW/HTTP",
45 "Topic :: Internet :: Proxy Servers",
46 "Topic :: System :: Networking :: Monitoring",
47 "Topic :: Software Development :: Testing",
48 "Typing :: Typed",
49 ],
50 project_urls={
51 "Documentation": "https://docs.mitmproxy.org/stable/",
52 "Source": "https://github.com/mitmproxy/mitmproxy/",
53 "Tracker": "https://github.com/mitmproxy/mitmproxy/issues",
54 },
55 packages=find_packages(
56 include=[
57 "mitmproxy",
58 "mitmproxy.*",
59 ]
60 ),
61 include_package_data=True,
62 entry_points={
63 "console_scripts": [
64 "mitmproxy = mitmproxy.tools.main:mitmproxy",
65 "mitmdump = mitmproxy.tools.main:mitmdump",
66 "mitmweb = mitmproxy.tools.main:mitmweb",
67 ],
68 "pyinstaller40": [
69 "hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs",
70 ]
71 },
72 python_requires=">=3.9",
73 # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires
74 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
75 install_requires=[
76 "asgiref>=3.2.10,<3.6",
77 "Brotli>=1.0,<1.1",
78 "certifi>=2019.9.11", # no semver here - this should always be on the last release!
79 "cryptography>=38.0,<38.1",
80 "flask>=1.1.1,<2.3",
81 "h11>=0.11,<0.15",
82 "h2>=4.1,<5",
83 "hyperframe>=6.0,<7",
84 "kaitaistruct>=0.10,<0.11",
85 "ldap3>=2.8,<2.10",
86 "mitmproxy_wireguard>=0.1.6,<0.2",
87 "msgpack>=1.0.0, <1.1.0",
88 "passlib>=1.6.5, <1.8",
89 "protobuf>=3.14,<5",
90 "pyOpenSSL>=22.1,<22.2",
91 "pyparsing>=2.4.2,<3.1",
92 "pyperclip>=1.6.0,<1.9",
93 "ruamel.yaml>=0.16,<0.18",
94 "sortedcontainers>=2.3,<2.5",
95 "tornado>=6.1,<7",
96 "urwid>=2.1.1,<2.2",
97 "wsproto>=1.0,<1.3",
98 "publicsuffix2>=2.20190812,<3",
99 "zstandard>=0.11,<0.20",
100 "typing-extensions>=4.3,<4.5; python_version<'3.10'",
101 ],
102 extras_require={
103 ':sys_platform == "win32"': [
104 "pydivert>=2.0.3,<2.2",
105 ],
106 "dev": [
107 "click>=7.0,<8.2",
108 "hypothesis>=5.8,<7",
109 "parver>=0.1,<2.0",
110 "pdoc>=4.0.0",
111 "pyinstaller==5.6.1",
112 "pytest-asyncio>=0.17,<0.21",
113 "pytest-cov>=2.7.1,<4.1",
114 "pytest-timeout>=1.3.3,<2.2",
115 "pytest-xdist>=2.1.0,<3.1",
116 "pytest>=6.1.0,<8",
117 "requests>=2.9.1,<3",
118 "tox>=3.5,<4",
119 "wheel>=0.36.2,<0.39",
120 ],
121 },
122 )
123
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -108,7 +108,7 @@
"hypothesis>=5.8,<7",
"parver>=0.1,<2.0",
"pdoc>=4.0.0",
- "pyinstaller==5.6.1",
+ "pyinstaller==5.5",
"pytest-asyncio>=0.17,<0.21",
"pytest-cov>=2.7.1,<4.1",
"pytest-timeout>=1.3.3,<2.2",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -108,7 +108,7 @@\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n- \"pyinstaller==5.6.1\",\n+ \"pyinstaller==5.5\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n", "issue": "Latest release broken on Ubuntu 22.04 - ImportError: libffi.so.7: cannot open shared object file: No such file or directory\n#### Problem Description\r\n\r\nhttps://askubuntu.com/questions/1286772/libffi-so-7-cannot-open-shared-object-file-no-such-file-or-directory\r\n\r\nI don't think our users should do anything with their system to make the binary run, correct?\r\n\r\n```\r\n$ ./mitmdump \r\nTraceback (most recent call last):\r\n File \"mitmdump\", line 3, in <module>\r\n File \"mitmproxy/tools/main.py\", line 131, in mitmdump\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"mitmproxy/tools/dump.py\", line 1, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"mitmproxy/addons/__init__.py\", line 10, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"mitmproxy/addons/cut.py\", line 14, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"pyperclip/__init__.py\", line 52, in <module>\r\n File \"PyInstaller/loader/pyimod02_importers.py\", line 499, in exec_module\r\n File \"ctypes/__init__.py\", line 8, in <module>\r\nImportError: libffi.so.7: cannot open shared object file: No such file or directory\r\n[50935] Failed to execute script 'mitmdump' due to unhandled exception!\r\n```\r\n\r\n#### Steps to reproduce the behavior:\r\n\r\n1. `mitmdump`\r\n\r\nRepros in a clean `docker run --rm -it ubuntu:22.04` as well\r\n\r\n#### System Information\r\n\r\n9.0.0 just downloaded (I cannot run `mitmdump --version` as it doesn't launch)\r\n\n", "before_files": [{"content": "import os\nimport re\nfrom codecs import open\n\nfrom setuptools import find_packages, setup\n\n# Based on https://github.com/pypa/sampleproject/blob/main/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\nlong_description_content_type = \"text/markdown\"\n\nwith open(os.path.join(here, \"mitmproxy/version.py\")) as f:\n match = re.search(r'VERSION = \"(.+?)\"', f.read())\n assert match\n VERSION = match.group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL/TLS-capable intercepting proxy for HTTP/1, HTTP/2, and WebSockets.\",\n long_description=long_description,\n long_description_content_type=long_description_content_type,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: System :: Networking :: Monitoring\",\n \"Topic :: Software Development :: Testing\",\n \"Typing :: Typed\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.mitmproxy.org/stable/\",\n \"Source\": \"https://github.com/mitmproxy/mitmproxy/\",\n \"Tracker\": \"https://github.com/mitmproxy/mitmproxy/issues\",\n },\n packages=find_packages(\n include=[\n \"mitmproxy\",\n \"mitmproxy.*\",\n ]\n ),\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n ],\n \"pyinstaller40\": [\n \"hook-dirs = mitmproxy.utils.pyinstaller:hook_dirs\",\n ]\n },\n python_requires=\">=3.9\",\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"asgiref>=3.2.10,<3.6\",\n \"Brotli>=1.0,<1.1\",\n \"certifi>=2019.9.11\", # no semver here - this should always be on the last release!\n \"cryptography>=38.0,<38.1\",\n \"flask>=1.1.1,<2.3\",\n \"h11>=0.11,<0.15\",\n \"h2>=4.1,<5\",\n \"hyperframe>=6.0,<7\",\n \"kaitaistruct>=0.10,<0.11\",\n \"ldap3>=2.8,<2.10\",\n \"mitmproxy_wireguard>=0.1.6,<0.2\",\n \"msgpack>=1.0.0, <1.1.0\",\n \"passlib>=1.6.5, <1.8\",\n \"protobuf>=3.14,<5\",\n \"pyOpenSSL>=22.1,<22.2\",\n \"pyparsing>=2.4.2,<3.1\",\n \"pyperclip>=1.6.0,<1.9\",\n \"ruamel.yaml>=0.16,<0.18\",\n \"sortedcontainers>=2.3,<2.5\",\n \"tornado>=6.1,<7\",\n \"urwid>=2.1.1,<2.2\",\n \"wsproto>=1.0,<1.3\",\n \"publicsuffix2>=2.20190812,<3\",\n \"zstandard>=0.11,<0.20\",\n \"typing-extensions>=4.3,<4.5; python_version<'3.10'\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n \"dev\": [\n \"click>=7.0,<8.2\",\n \"hypothesis>=5.8,<7\",\n \"parver>=0.1,<2.0\",\n \"pdoc>=4.0.0\",\n \"pyinstaller==5.6.1\",\n \"pytest-asyncio>=0.17,<0.21\",\n \"pytest-cov>=2.7.1,<4.1\",\n \"pytest-timeout>=1.3.3,<2.2\",\n \"pytest-xdist>=2.1.0,<3.1\",\n \"pytest>=6.1.0,<8\",\n \"requests>=2.9.1,<3\",\n \"tox>=3.5,<4\",\n \"wheel>=0.36.2,<0.39\",\n ],\n },\n)\n", "path": "setup.py"}]} | 2,536 | 141 |
gh_patches_debug_34206 | rasdani/github-patches | git_diff | kymatio__kymatio-244 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RELEASE requirements?
Hi,
When doing:
```
conda install pytorch torchvision -c pytorch
pip install -i https://test.pypi.org/simple/ kymatio
```
then cloning the corresponding release, and doing `pytest`, I have tons of errors that are evidences that none of the requirements.txt packages are installed. Actually, this makes me think we should have a pip for the GPU version(like `pip install kymatio-gpu`)?
https://github.com/kymatio/kymatio/blob/master/requirements.txt
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import csv
3 import importlib
4 import os
5 import shutil
6 import sys
7 from setuptools import setup, find_packages
8
9 # Constants
10 DISTNAME = 'kymatio'
11 DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
12 URL = 'https://kymatio.github.io'
13 LICENSE = 'BSD-3-Clause'
14
15
16 # Parse description
17 with open('README.md') as f:
18 README = f.read().split('\n')
19 LONG_DESCRIPTION = '\n'.join([x for x in README if not x[:3]=='[!['])
20
21
22 # Parse version.py
23 kymatio_version_spec = importlib.util.spec_from_file_location(
24 'kymatio_version', 'kymatio/version.py')
25 kymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)
26 kymatio_version_spec.loader.exec_module(kymatio_version_module)
27 VERSION = kymatio_version_module.version
28
29
30 # Parse requirements.txt
31 with open('requirements.txt', 'r') as f:
32 REQUIREMENTS = f.read().split('\n')
33
34
35 setup_info = dict(
36 # Metadata
37 name=DISTNAME,
38 version=VERSION,
39 author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '
40 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '
41 'Louis Thiry, Vincent Lostanlen, Joakim Andén, '
42 'Tomás Angles, Gabriel Huang, Roberto Leonarduzzi'),
43 author_email=('[email protected], [email protected], '
44 '[email protected], [email protected], '
45 '[email protected], [email protected], '
46 '[email protected], [email protected], [email protected], '
47 '[email protected], [email protected], [email protected]'),
48 url=URL,
49 download_url='https://github.com/kymatio/kymatio/releases',
50 classifiers=['Intended Audience :: Education',
51 'Intended Audience :: Science/Research',
52 'License :: OSI Approved :: BSD License',
53 'Natural Language :: English',
54 'Operating System :: MacOS',
55 'Operating System :: Microsoft :: Windows',
56 'Operating System :: POSIX :: Linux',
57 'Programming Language :: Python :: 3.4',
58 'Programming Language :: Python :: 3.5',
59 'Programming Language :: Python :: 3.6',
60 'Programming Language :: Python :: 3.7',
61 'Programming Language :: Python :: 3.8',
62 'Topic :: Multimedia :: Graphics :: 3D Modeling',
63 'Topic :: Multimedia :: Sound/Audio :: Analysis',
64 'Topic :: Scientific/Engineering :: Artificial Intelligence',
65 'Topic :: Scientific/Engineering :: Chemistry',
66 'Topic :: Scientific/Engineering :: Image Recognition',
67 'Topic :: Scientific/Engineering :: Information Analysis',
68 'Topic :: Scientific/Engineering :: Mathematics',
69 'Topic :: Scientific/Engineering :: Physics',
70 'Topic :: Software Development :: Libraries :: Python Modules',
71 ],
72 description=DESCRIPTION,
73 long_description=LONG_DESCRIPTION,
74 long_description_content_type='text/markdown',
75 license=LICENSE,
76 packages=find_packages(exclude=('test',)),
77 install_requires=REQUIREMENTS,
78 zip_safe=True,
79 )
80
81 setup(**setup_info)
82
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
# Constants
DISTNAME = 'kymatio'
DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'
-URL = 'https://kymatio.github.io'
+URL = 'https://www.kymat.io'
LICENSE = 'BSD-3-Clause'
@@ -47,15 +47,18 @@
'[email protected], [email protected], [email protected]'),
url=URL,
download_url='https://github.com/kymatio/kymatio/releases',
+ project_urls={
+ 'Documentation': 'https://www.kymat.io/codereference.html',
+ 'Source': 'https://github.com/kymatio/kymatio/',
+ 'Tracker': 'https://github.com/kymatio/kymatio/issues',
+ 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'
+ },
classifiers=['Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS',
- 'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
@@ -72,6 +75,7 @@
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
+ python_requires='>=3.6',
license=LICENSE,
packages=find_packages(exclude=('test',)),
install_requires=REQUIREMENTS,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n # Constants\n DISTNAME = 'kymatio'\n DESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\n-URL = 'https://kymatio.github.io'\n+URL = 'https://www.kymat.io'\n LICENSE = 'BSD-3-Clause'\n \n \n@@ -47,15 +47,18 @@\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n+ project_urls={\n+ 'Documentation': 'https://www.kymat.io/codereference.html',\n+ 'Source': 'https://github.com/kymatio/kymatio/',\n+ 'Tracker': 'https://github.com/kymatio/kymatio/issues',\n+ 'Authors': 'https://github.com/kymatio/kymatio/blob/master/AUTHORS.md'\n+ },\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n- 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n- 'Programming Language :: Python :: 3.4',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n@@ -72,6 +75,7 @@\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n+ python_requires='>=3.6',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n", "issue": "RELEASE requirements?\nHi,\r\n\r\nWhen doing:\r\n\r\n```\r\nconda install pytorch torchvision -c pytorch\r\npip install -i https://test.pypi.org/simple/ kymatio\r\n```\r\n\r\nthen cloning the corresponding release, and doing `pytest`, I have tons of errors that are evidences that none of the requirements.txt packages are installed. Actually, this makes me think we should have a pip for the GPU version(like `pip install kymatio-gpu`)?\r\n\r\nhttps://github.com/kymatio/kymatio/blob/master/requirements.txt\n", "before_files": [{"content": "#!/usr/bin/env python\nimport csv\nimport importlib\nimport os\nimport shutil\nimport sys\nfrom setuptools import setup, find_packages\n\n# Constants\nDISTNAME = 'kymatio'\nDESCRIPTION = 'Wavelet scattering transforms in Python with GPU acceleration'\nURL = 'https://kymatio.github.io'\nLICENSE = 'BSD-3-Clause'\n\n\n# Parse description\nwith open('README.md') as f:\n README = f.read().split('\\n')\n LONG_DESCRIPTION = '\\n'.join([x for x in README if not x[:3]=='[!['])\n\n\n# Parse version.py\nkymatio_version_spec = importlib.util.spec_from_file_location(\n 'kymatio_version', 'kymatio/version.py')\nkymatio_version_module = importlib.util.module_from_spec(kymatio_version_spec)\nkymatio_version_spec.loader.exec_module(kymatio_version_module)\nVERSION = kymatio_version_module.version\n\n\n# Parse requirements.txt\nwith open('requirements.txt', 'r') as f:\n REQUIREMENTS = f.read().split('\\n')\n\n\nsetup_info = dict(\n # Metadata\n name=DISTNAME,\n version=VERSION,\n author=('Edouard Oyallon, Eugene Belilovsky, Sergey Zagoruyko, '\n 'Michael Eickenberg, Mathieu Andreux, Georgios Exarchakis, '\n 'Louis Thiry, Vincent Lostanlen, Joakim And\u00e9n, '\n 'Tom\u00e1s Angles, Gabriel Huang, Roberto Leonarduzzi'),\n author_email=('[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], '\n '[email protected], [email protected], [email protected], '\n '[email protected], [email protected], [email protected]'),\n url=URL,\n download_url='https://github.com/kymatio/kymatio/releases',\n classifiers=['Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n 'Topic :: Multimedia :: Sound/Audio :: Analysis',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Chemistry',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n license=LICENSE,\n packages=find_packages(exclude=('test',)),\n install_requires=REQUIREMENTS,\n zip_safe=True,\n)\n\nsetup(**setup_info)\n", "path": "setup.py"}]} | 1,556 | 427 |
gh_patches_debug_6618 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-1687 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
REPL client help is missing some commands
The help for the REPL client is missing some commands from `help`, and instead has `client.{c[0]}`.
```
pymodbus.console tcp
❯ pymodbus.console tcp
----------------------------------------------------------------------------
__________ _____ .___ __________ .__
\______ \___.__. / \ ____ __| _/ \______ \ ____ ______ | |
| ___< | |/ \ / \ / _ \ / __ | | _// __ \\\____ \| |
| | \___ / Y ( <_> ) /_/ | | | \ ___/| |_> > |__
|____| / ____\____|__ /\____/\____ | /\ |____|_ /\___ > __/|____/
\/ \/ \/ \/ \/ \/|__|
v1.3.0 - 3.5.0dev
----------------------------------------------------------------------------
> help
Available commands:
<...>
client.{c[0]}
```
This was caught by a `ruff` rule.
> RUF011 Dictionary comprehension uses static key: `"client.{c[0]}"`
</issue>
<code>
[start of pymodbus/repl/client/helper.py]
1 """Helper Module for REPL actions."""
2 import inspect
3
4 # pylint: disable=missing-type-doc
5 import json
6 from collections import OrderedDict
7 from typing import Any, Dict, List, Union
8
9 import pygments
10 from prompt_toolkit import print_formatted_text
11 from prompt_toolkit.formatted_text import HTML, PygmentsTokens
12 from pygments.lexers.data import JsonLexer
13
14 from pymodbus.payload import BinaryPayloadDecoder, Endian
15
16
17 predicate = inspect.isfunction
18 argspec = inspect.signature
19
20
21 FORMATTERS = {
22 "int8": "decode_8bit_int",
23 "int16": "decode_16bit_int",
24 "int32": "decode_32bit_int",
25 "int64": "decode_64bit_int",
26 "uint8": "decode_8bit_uint",
27 "uint16": "decode_16bit_uint",
28 "uint32": "decode_32bit_uint",
29 "uint64": "decode_64bit_int",
30 "float16": "decode_16bit_float",
31 "float32": "decode_32bit_float",
32 "float64": "decode_64bit_float",
33 }
34
35
36 DEFAULT_KWARGS = {"slave": "Slave address"}
37
38 OTHER_COMMANDS = {
39 "result.raw": "Show RAW Result",
40 "result.decode": "Decode register response to known formats",
41 }
42 EXCLUDE = ["execute", "recv", "send", "trace", "set_debug"]
43 CLIENT_METHODS = [
44 "connect",
45 "close",
46 "idle_time",
47 "is_socket_open",
48 "get_port",
49 "set_port",
50 "get_stopbits",
51 "set_stopbits",
52 "get_bytesize",
53 "set_bytesize",
54 "get_parity",
55 "set_parity",
56 "get_baudrate",
57 "set_baudrate",
58 "get_timeout",
59 "set_timeout",
60 "get_serial_settings",
61 ]
62 CLIENT_ATTRIBUTES: List[str] = []
63
64
65 class Command:
66 """Class representing Commands to be consumed by Completer."""
67
68 def __init__(self, name, signature, doc, slave=False):
69 """Initialize.
70
71 :param name: Name of the command
72 :param signature: inspect object
73 :param doc: Doc string for the command
74 :param slave: Use slave as additional argument in the command .
75 """
76 self.name = name
77 self.doc = doc.split("\n") if doc else " ".join(name.split("_"))
78 self.help_text = self._create_help()
79 self.param_help = self._create_arg_help()
80 if signature:
81 self._params = signature.parameters
82 self.args = self.create_completion()
83 else:
84 self._params = ""
85
86 if self.name.startswith("client.") and slave:
87 self.args.update(**DEFAULT_KWARGS)
88
89 def _create_help(self):
90 """Create help."""
91 doc = filter(lambda d: d, self.doc)
92 cmd_help = list(
93 filter(
94 lambda x: not x.startswith(":param") and not x.startswith(":return"),
95 doc,
96 )
97 )
98 return " ".join(cmd_help).strip()
99
100 def _create_arg_help(self):
101 """Create arg help."""
102 param_dict = {}
103 params = list(filter(lambda d: d.strip().startswith(":param"), self.doc))
104 for param in params:
105 param, param_help = param.split(":param")[1].strip().split(":")
106 param_dict[param] = param_help
107 return param_dict
108
109 def create_completion(self):
110 """Create command completion meta data.
111
112 :return:
113 """
114 words = {}
115
116 def _create(entry, default):
117 if entry not in ["self", "kwargs"]:
118 if isinstance(default, (int, str)):
119 entry += f"={default}"
120 return entry
121 return None
122
123 for arg in self._params.values():
124 if entry := _create(arg.name, arg.default):
125 entry, meta = self.get_meta(entry)
126 words[entry] = meta
127
128 return words
129
130 def get_completion(self):
131 """Get a list of completions.
132
133 :return:
134 """
135 return self.args.keys()
136
137 def get_meta(self, cmd):
138 """Get Meta info of a given command.
139
140 :param cmd: Name of command.
141 :return: Dict containing meta info.
142 """
143 cmd = cmd.strip()
144 cmd = cmd.split("=")[0].strip()
145 return cmd, self.param_help.get(cmd, "")
146
147 def __str__(self):
148 """Return string representation."""
149 if self.doc:
150 return "Command {:>50}{:<20}".format( # pylint: disable=consider-using-f-string
151 self.name, self.doc
152 )
153 return f"Command {self.name}"
154
155
156 def _get_requests(members):
157 """Get requests."""
158 commands = list(
159 filter(
160 lambda x: (
161 x[0] not in EXCLUDE and x[0] not in CLIENT_METHODS and callable(x[1])
162 ),
163 members,
164 )
165 )
166 commands = {
167 f"client.{c[0]}": Command(
168 f"client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
169 )
170 for c in commands
171 if not c[0].startswith("_")
172 }
173 return commands
174
175
176 def _get_client_methods(members):
177 """Get client methods."""
178 commands = list(
179 filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)
180 )
181 commands = {
182 "client.{c[0]}": Command(
183 "client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
184 )
185 for c in commands
186 if not c[0].startswith("_")
187 }
188 return commands
189
190
191 def _get_client_properties(members):
192 """Get client properties."""
193 global CLIENT_ATTRIBUTES # pylint: disable=global-variable-not-assigned
194 commands = list(filter(lambda x: not callable(x[1]), members))
195 commands = {
196 f"client.{c[0]}": Command(f"client.{c[0]}", None, "Read Only!", slave=False)
197 for c in commands
198 if (not c[0].startswith("_") and isinstance(c[1], (str, int, float)))
199 }
200 CLIENT_ATTRIBUTES.extend(list(commands.keys()))
201 return commands
202
203
204 def get_commands(client):
205 """Retrieve all required methods and attributes.
206
207 Of a client object and convert it to commands.
208
209 :param client: Modbus Client object.
210 :return:
211 """
212 commands = {}
213 members = inspect.getmembers(client)
214 requests = _get_requests(members)
215 client_methods = _get_client_methods(members)
216 client_attr = _get_client_properties(members)
217
218 result_commands = inspect.getmembers(Result, predicate=predicate)
219 result_commands = {
220 f"result.{c[0]}": Command(f"result.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]))
221 for c in result_commands
222 if (not c[0].startswith("_") and c[0] != "print_result")
223 }
224 commands.update(requests)
225 commands.update(client_methods)
226 commands.update(client_attr)
227 commands.update(result_commands)
228 return commands
229
230
231 class Result:
232 """Represent result command."""
233
234 function_code: int = None
235 data: Union[Dict[int, Any], Any] = None
236
237 def __init__(self, result):
238 """Initialize.
239
240 :param result: Response of a modbus command.
241 """
242 if isinstance(result, dict): # Modbus response
243 self.function_code = result.pop("function_code", None)
244 self.data = dict(result)
245 else:
246 self.data = result
247
248 def decode(self, formatters, byte_order="big", word_order="big"):
249 """Decode the register response to known formatters.
250
251 :param formatters: int8/16/32/64, uint8/16/32/64, float32/64
252 :param byte_order: little/big
253 :param word_order: little/big
254 """
255 # Read Holding Registers (3)
256 # Read Input Registers (4)
257 # Read Write Registers (23)
258 if not isinstance(formatters, (list, tuple)):
259 formatters = [formatters]
260
261 if self.function_code not in [3, 4, 23]:
262 print_formatted_text(HTML("<red>Decoder works only for registers!!</red>"))
263 return
264 byte_order = (
265 Endian.Little if byte_order.strip().lower() == "little" else Endian.Big
266 )
267 word_order = (
268 Endian.Little if word_order.strip().lower() == "little" else Endian.Big
269 )
270 decoder = BinaryPayloadDecoder.fromRegisters(
271 self.data.get("registers"), byteorder=byte_order, wordorder=word_order
272 )
273 for formatter in formatters:
274 if not (formatter := FORMATTERS.get(formatter)):
275 print_formatted_text(
276 HTML(f"<red>Invalid Formatter - {formatter}!!</red>")
277 )
278 return
279 decoded = getattr(decoder, formatter)()
280 self.print_result(decoded)
281
282 def raw(self):
283 """Return raw result dict."""
284 self.print_result()
285
286 def _process_dict(self, use_dict):
287 """Process dict."""
288 new_dict = OrderedDict()
289 for k, v_item in use_dict.items():
290 if isinstance(v_item, bytes):
291 v_item = v_item.decode("utf-8")
292 elif isinstance(v_item, dict):
293 v_item = self._process_dict(v_item)
294 elif isinstance(v_item, (list, tuple)):
295 v_item = [
296 v1.decode("utf-8") if isinstance(v1, bytes) else v1 for v1 in v_item
297 ]
298 new_dict[k] = v_item
299 return new_dict
300
301 def print_result(self, data=None):
302 """Print result object pretty.
303
304 :param data: Data to be printed.
305 """
306 data = data or self.data
307 if isinstance(data, dict):
308 data = self._process_dict(data)
309 elif isinstance(data, (list, tuple)):
310 data = [v.decode("utf-8") if isinstance(v, bytes) else v for v in data]
311 elif isinstance(data, bytes):
312 data = data.decode("utf-8")
313 tokens = list(pygments.lex(json.dumps(data, indent=4), lexer=JsonLexer()))
314 print_formatted_text(PygmentsTokens(tokens))
315
[end of pymodbus/repl/client/helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pymodbus/repl/client/helper.py b/pymodbus/repl/client/helper.py
--- a/pymodbus/repl/client/helper.py
+++ b/pymodbus/repl/client/helper.py
@@ -179,8 +179,8 @@
filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)
)
commands = {
- "client.{c[0]}": Command(
- "client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
+ f"client.{c[0]}": Command(
+ f"client.{c[0]}", argspec(c[1]), inspect.getdoc(c[1]), slave=False
)
for c in commands
if not c[0].startswith("_")
| {"golden_diff": "diff --git a/pymodbus/repl/client/helper.py b/pymodbus/repl/client/helper.py\n--- a/pymodbus/repl/client/helper.py\n+++ b/pymodbus/repl/client/helper.py\n@@ -179,8 +179,8 @@\n filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)\n )\n commands = {\n- \"client.{c[0]}\": Command(\n- \"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n+ f\"client.{c[0]}\": Command(\n+ f\"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n", "issue": "REPL client help is missing some commands\nThe help for the REPL client is missing some commands from `help`, and instead has `client.{c[0]}`.\r\n\r\n```\r\npymodbus.console tcp\r\n\u276f pymodbus.console tcp\r\n\r\n----------------------------------------------------------------------------\r\n__________ _____ .___ __________ .__\r\n\\______ \\___.__. / \\ ____ __| _/ \\______ \\ ____ ______ | |\r\n | ___< | |/ \\ / \\ / _ \\ / __ | | _// __ \\\\\\____ \\| |\r\n | | \\___ / Y ( <_> ) /_/ | | | \\ ___/| |_> > |__\r\n |____| / ____\\____|__ /\\____/\\____ | /\\ |____|_ /\\___ > __/|____/\r\n \\/ \\/ \\/ \\/ \\/ \\/|__|\r\n v1.3.0 - 3.5.0dev\r\n----------------------------------------------------------------------------\r\n\r\n> help\r\nAvailable commands:\r\n<...>\r\nclient.{c[0]}\r\n```\r\n\r\nThis was caught by a `ruff` rule.\r\n> RUF011 Dictionary comprehension uses static key: `\"client.{c[0]}\"`\r\n\n", "before_files": [{"content": "\"\"\"Helper Module for REPL actions.\"\"\"\nimport inspect\n\n# pylint: disable=missing-type-doc\nimport json\nfrom collections import OrderedDict\nfrom typing import Any, Dict, List, Union\n\nimport pygments\nfrom prompt_toolkit import print_formatted_text\nfrom prompt_toolkit.formatted_text import HTML, PygmentsTokens\nfrom pygments.lexers.data import JsonLexer\n\nfrom pymodbus.payload import BinaryPayloadDecoder, Endian\n\n\npredicate = inspect.isfunction\nargspec = inspect.signature\n\n\nFORMATTERS = {\n \"int8\": \"decode_8bit_int\",\n \"int16\": \"decode_16bit_int\",\n \"int32\": \"decode_32bit_int\",\n \"int64\": \"decode_64bit_int\",\n \"uint8\": \"decode_8bit_uint\",\n \"uint16\": \"decode_16bit_uint\",\n \"uint32\": \"decode_32bit_uint\",\n \"uint64\": \"decode_64bit_int\",\n \"float16\": \"decode_16bit_float\",\n \"float32\": \"decode_32bit_float\",\n \"float64\": \"decode_64bit_float\",\n}\n\n\nDEFAULT_KWARGS = {\"slave\": \"Slave address\"}\n\nOTHER_COMMANDS = {\n \"result.raw\": \"Show RAW Result\",\n \"result.decode\": \"Decode register response to known formats\",\n}\nEXCLUDE = [\"execute\", \"recv\", \"send\", \"trace\", \"set_debug\"]\nCLIENT_METHODS = [\n \"connect\",\n \"close\",\n \"idle_time\",\n \"is_socket_open\",\n \"get_port\",\n \"set_port\",\n \"get_stopbits\",\n \"set_stopbits\",\n \"get_bytesize\",\n \"set_bytesize\",\n \"get_parity\",\n \"set_parity\",\n \"get_baudrate\",\n \"set_baudrate\",\n \"get_timeout\",\n \"set_timeout\",\n \"get_serial_settings\",\n]\nCLIENT_ATTRIBUTES: List[str] = []\n\n\nclass Command:\n \"\"\"Class representing Commands to be consumed by Completer.\"\"\"\n\n def __init__(self, name, signature, doc, slave=False):\n \"\"\"Initialize.\n\n :param name: Name of the command\n :param signature: inspect object\n :param doc: Doc string for the command\n :param slave: Use slave as additional argument in the command .\n \"\"\"\n self.name = name\n self.doc = doc.split(\"\\n\") if doc else \" \".join(name.split(\"_\"))\n self.help_text = self._create_help()\n self.param_help = self._create_arg_help()\n if signature:\n self._params = signature.parameters\n self.args = self.create_completion()\n else:\n self._params = \"\"\n\n if self.name.startswith(\"client.\") and slave:\n self.args.update(**DEFAULT_KWARGS)\n\n def _create_help(self):\n \"\"\"Create help.\"\"\"\n doc = filter(lambda d: d, self.doc)\n cmd_help = list(\n filter(\n lambda x: not x.startswith(\":param\") and not x.startswith(\":return\"),\n doc,\n )\n )\n return \" \".join(cmd_help).strip()\n\n def _create_arg_help(self):\n \"\"\"Create arg help.\"\"\"\n param_dict = {}\n params = list(filter(lambda d: d.strip().startswith(\":param\"), self.doc))\n for param in params:\n param, param_help = param.split(\":param\")[1].strip().split(\":\")\n param_dict[param] = param_help\n return param_dict\n\n def create_completion(self):\n \"\"\"Create command completion meta data.\n\n :return:\n \"\"\"\n words = {}\n\n def _create(entry, default):\n if entry not in [\"self\", \"kwargs\"]:\n if isinstance(default, (int, str)):\n entry += f\"={default}\"\n return entry\n return None\n\n for arg in self._params.values():\n if entry := _create(arg.name, arg.default):\n entry, meta = self.get_meta(entry)\n words[entry] = meta\n\n return words\n\n def get_completion(self):\n \"\"\"Get a list of completions.\n\n :return:\n \"\"\"\n return self.args.keys()\n\n def get_meta(self, cmd):\n \"\"\"Get Meta info of a given command.\n\n :param cmd: Name of command.\n :return: Dict containing meta info.\n \"\"\"\n cmd = cmd.strip()\n cmd = cmd.split(\"=\")[0].strip()\n return cmd, self.param_help.get(cmd, \"\")\n\n def __str__(self):\n \"\"\"Return string representation.\"\"\"\n if self.doc:\n return \"Command {:>50}{:<20}\".format( # pylint: disable=consider-using-f-string\n self.name, self.doc\n )\n return f\"Command {self.name}\"\n\n\ndef _get_requests(members):\n \"\"\"Get requests.\"\"\"\n commands = list(\n filter(\n lambda x: (\n x[0] not in EXCLUDE and x[0] not in CLIENT_METHODS and callable(x[1])\n ),\n members,\n )\n )\n commands = {\n f\"client.{c[0]}\": Command(\n f\"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n }\n return commands\n\n\ndef _get_client_methods(members):\n \"\"\"Get client methods.\"\"\"\n commands = list(\n filter(lambda x: (x[0] not in EXCLUDE and x[0] in CLIENT_METHODS), members)\n )\n commands = {\n \"client.{c[0]}\": Command(\n \"client.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]), slave=False\n )\n for c in commands\n if not c[0].startswith(\"_\")\n }\n return commands\n\n\ndef _get_client_properties(members):\n \"\"\"Get client properties.\"\"\"\n global CLIENT_ATTRIBUTES # pylint: disable=global-variable-not-assigned\n commands = list(filter(lambda x: not callable(x[1]), members))\n commands = {\n f\"client.{c[0]}\": Command(f\"client.{c[0]}\", None, \"Read Only!\", slave=False)\n for c in commands\n if (not c[0].startswith(\"_\") and isinstance(c[1], (str, int, float)))\n }\n CLIENT_ATTRIBUTES.extend(list(commands.keys()))\n return commands\n\n\ndef get_commands(client):\n \"\"\"Retrieve all required methods and attributes.\n\n Of a client object and convert it to commands.\n\n :param client: Modbus Client object.\n :return:\n \"\"\"\n commands = {}\n members = inspect.getmembers(client)\n requests = _get_requests(members)\n client_methods = _get_client_methods(members)\n client_attr = _get_client_properties(members)\n\n result_commands = inspect.getmembers(Result, predicate=predicate)\n result_commands = {\n f\"result.{c[0]}\": Command(f\"result.{c[0]}\", argspec(c[1]), inspect.getdoc(c[1]))\n for c in result_commands\n if (not c[0].startswith(\"_\") and c[0] != \"print_result\")\n }\n commands.update(requests)\n commands.update(client_methods)\n commands.update(client_attr)\n commands.update(result_commands)\n return commands\n\n\nclass Result:\n \"\"\"Represent result command.\"\"\"\n\n function_code: int = None\n data: Union[Dict[int, Any], Any] = None\n\n def __init__(self, result):\n \"\"\"Initialize.\n\n :param result: Response of a modbus command.\n \"\"\"\n if isinstance(result, dict): # Modbus response\n self.function_code = result.pop(\"function_code\", None)\n self.data = dict(result)\n else:\n self.data = result\n\n def decode(self, formatters, byte_order=\"big\", word_order=\"big\"):\n \"\"\"Decode the register response to known formatters.\n\n :param formatters: int8/16/32/64, uint8/16/32/64, float32/64\n :param byte_order: little/big\n :param word_order: little/big\n \"\"\"\n # Read Holding Registers (3)\n # Read Input Registers (4)\n # Read Write Registers (23)\n if not isinstance(formatters, (list, tuple)):\n formatters = [formatters]\n\n if self.function_code not in [3, 4, 23]:\n print_formatted_text(HTML(\"<red>Decoder works only for registers!!</red>\"))\n return\n byte_order = (\n Endian.Little if byte_order.strip().lower() == \"little\" else Endian.Big\n )\n word_order = (\n Endian.Little if word_order.strip().lower() == \"little\" else Endian.Big\n )\n decoder = BinaryPayloadDecoder.fromRegisters(\n self.data.get(\"registers\"), byteorder=byte_order, wordorder=word_order\n )\n for formatter in formatters:\n if not (formatter := FORMATTERS.get(formatter)):\n print_formatted_text(\n HTML(f\"<red>Invalid Formatter - {formatter}!!</red>\")\n )\n return\n decoded = getattr(decoder, formatter)()\n self.print_result(decoded)\n\n def raw(self):\n \"\"\"Return raw result dict.\"\"\"\n self.print_result()\n\n def _process_dict(self, use_dict):\n \"\"\"Process dict.\"\"\"\n new_dict = OrderedDict()\n for k, v_item in use_dict.items():\n if isinstance(v_item, bytes):\n v_item = v_item.decode(\"utf-8\")\n elif isinstance(v_item, dict):\n v_item = self._process_dict(v_item)\n elif isinstance(v_item, (list, tuple)):\n v_item = [\n v1.decode(\"utf-8\") if isinstance(v1, bytes) else v1 for v1 in v_item\n ]\n new_dict[k] = v_item\n return new_dict\n\n def print_result(self, data=None):\n \"\"\"Print result object pretty.\n\n :param data: Data to be printed.\n \"\"\"\n data = data or self.data\n if isinstance(data, dict):\n data = self._process_dict(data)\n elif isinstance(data, (list, tuple)):\n data = [v.decode(\"utf-8\") if isinstance(v, bytes) else v for v in data]\n elif isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n tokens = list(pygments.lex(json.dumps(data, indent=4), lexer=JsonLexer()))\n print_formatted_text(PygmentsTokens(tokens))\n", "path": "pymodbus/repl/client/helper.py"}]} | 3,964 | 190 |
gh_patches_debug_27447 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1310 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AZURE_103 not accepting vsts_configuration as valid Git repository
**Describe the bug**
The rule CKV_AZURE_103 only accepts the Github configuration for Git source control for an Azure Data Factory instance. However, it is possible to configure a Git source control using the `vsts_configuration`.
**To Reproduce**
Steps to reproduce the behavior:
Create the following resource
```hcl
resource "azurerm_data_factory" "main" {
# General
name = "dummy-name"
resource_group_name = azurerm_resource_group.primary.name
location = azurerm_resource_group.primary.location
# Azure DevOps
vsts_configuration {
account_name = var.account_name
branch_name = var.branch_name
project_name = var.project_name
repository_name = var.repository_name
root_folder = var.root_folder
tenant_id = data.azurerm_client_config.current.tenant_id
}
}
```
**Expected behavior**
Expected to accept both vsts_configuration and github_configuration as valid Git source control configurations.
**Desktop (please complete the following information):**
- OS: Ubuntu 20.04 LTS
- Checkov Version 2.0.86
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py]
1 from checkov.common.models.consts import ANY_VALUE
2 from checkov.common.models.enums import CheckCategories
3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
4
5
6 class DataFactoryUsesGitRepository(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure that Azure Data Factory uses Git repository for source control"
9 id = "CKV_AZURE_103"
10 supported_resources = ['azurerm_data_factory']
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def get_inspected_key(self):
15 return "github_configuration/[0]/repository_name"
16
17 def get_expected_value(self):
18 return ANY_VALUE
19
20
21 check = DataFactoryUsesGitRepository()
22
[end of checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py
--- a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py
+++ b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py
@@ -1,21 +1,25 @@
-from checkov.common.models.consts import ANY_VALUE
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+from typing import Dict, List, Any
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-class DataFactoryUsesGitRepository(BaseResourceValueCheck):
- def __init__(self):
+
+class DataFactoryUsesGitRepository(BaseResourceCheck):
+ def __init__(self) -> None:
name = "Ensure that Azure Data Factory uses Git repository for source control"
id = "CKV_AZURE_103"
- supported_resources = ['azurerm_data_factory']
+ supported_resources = ["azurerm_data_factory"]
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return "github_configuration/[0]/repository_name"
-
- def get_expected_value(self):
- return ANY_VALUE
+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
+ github = conf.get("github_configuration", [{}])[0]
+ if github.get("repository_name"):
+ return CheckResult.PASSED
+ vsts = conf.get("vsts_configuration", [{}])[0]
+ if vsts.get("repository_name"):
+ return CheckResult.PASSED
+ return CheckResult.FAILED
check = DataFactoryUsesGitRepository()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n--- a/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n+++ b/checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py\n@@ -1,21 +1,25 @@\n-from checkov.common.models.consts import ANY_VALUE\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from typing import Dict, List, Any\n \n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n-class DataFactoryUsesGitRepository(BaseResourceValueCheck):\n- def __init__(self):\n+\n+class DataFactoryUsesGitRepository(BaseResourceCheck):\n+ def __init__(self) -> None:\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n- supported_resources = ['azurerm_data_factory']\n+ supported_resources = [\"azurerm_data_factory\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return \"github_configuration/[0]/repository_name\"\n-\n- def get_expected_value(self):\n- return ANY_VALUE\n+ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:\n+ github = conf.get(\"github_configuration\", [{}])[0]\n+ if github.get(\"repository_name\"):\n+ return CheckResult.PASSED\n+ vsts = conf.get(\"vsts_configuration\", [{}])[0]\n+ if vsts.get(\"repository_name\"):\n+ return CheckResult.PASSED\n+ return CheckResult.FAILED\n \n \n check = DataFactoryUsesGitRepository()\n", "issue": "CKV_AZURE_103 not accepting vsts_configuration as valid Git repository\n**Describe the bug**\r\nThe rule CKV_AZURE_103 only accepts the Github configuration for Git source control for an Azure Data Factory instance. However, it is possible to configure a Git source control using the `vsts_configuration`. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nCreate the following resource\r\n```hcl\r\nresource \"azurerm_data_factory\" \"main\" {\r\n\r\n # General\r\n name = \"dummy-name\"\r\n resource_group_name = azurerm_resource_group.primary.name\r\n location = azurerm_resource_group.primary.location\r\n\r\n # Azure DevOps\r\n vsts_configuration {\r\n account_name = var.account_name\r\n branch_name = var.branch_name\r\n project_name = var.project_name\r\n repository_name = var.repository_name\r\n root_folder = var.root_folder\r\n tenant_id = data.azurerm_client_config.current.tenant_id\r\n }\r\n\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nExpected to accept both vsts_configuration and github_configuration as valid Git source control configurations.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 20.04 LTS\r\n - Checkov Version 2.0.86\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DataFactoryUsesGitRepository(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Azure Data Factory uses Git repository for source control\"\n id = \"CKV_AZURE_103\"\n supported_resources = ['azurerm_data_factory']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"github_configuration/[0]/repository_name\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = DataFactoryUsesGitRepository()\n", "path": "checkov/terraform/checks/resource/azure/DataFactoryUsesGitRepository.py"}]} | 1,036 | 436 |
gh_patches_debug_37919 | rasdani/github-patches | git_diff | google__timesketch-539 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tsctl to support TSV files
`tsctl` currently supports reading CSV files, however, often times its easier/less likely data gets broken by using _tab_ delimited files.
This is trivial to implement, e.g - :
1. add the option `tsv2ts` to [`shell_manager`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L424)
2. in `CreateTimelineFromCsv()`have an option for _delimiter_ and if `tsv2ts` was provided, change the CSV [`reader`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L412) delimiter to a tab.
</issue>
<code>
[start of timesketch/lib/tasks.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Celery task for processing Plaso storage files."""
15
16 import logging
17 import subprocess
18 import traceback
19
20 from flask import current_app
21
22 from timesketch import create_app
23 from timesketch import create_celery_app
24 from timesketch.lib.datastores.elastic import ElasticsearchDataStore
25 from timesketch.lib.utils import read_and_validate_csv
26 from timesketch.lib.utils import read_and_validate_jsonl
27 from timesketch.models import db_session
28 from timesketch.models.sketch import SearchIndex
29 from timesketch.models.sketch import Timeline
30
31 celery = create_celery_app()
32 flask_app = create_app()
33
34
35 def _set_timeline_status(index_name, status, error_msg=None):
36 """Helper function to set status for searchindex and all related timelines.
37
38 Args:
39 index_name: Name of the datastore index.
40 status: Status to set.
41 error_msg: Error message.
42 """
43 # Run within Flask context so we can make database changes
44 with flask_app.app_context():
45 searchindex = SearchIndex.query.filter_by(index_name=index_name).first()
46 timelines = Timeline.query.filter_by(searchindex=searchindex).all()
47
48 # Set status
49 searchindex.set_status(status)
50 for timeline in timelines:
51 timeline.set_status(status)
52 db_session.add(timeline)
53
54 # Update description if there was a failure in ingestion
55 if error_msg and status == u'fail':
56 # TODO: Don't overload the description field.
57 searchindex.description = error_msg
58
59 # Commit changes to database
60 db_session.add(searchindex)
61 db_session.commit()
62
63
64 @celery.task(track_started=True)
65 def run_plaso(source_file_path, timeline_name, index_name, source_type,
66 username=None):
67 """Create a Celery task for processing Plaso storage file.
68
69 Args:
70 source_file_path: Path to plaso storage file.
71 timeline_name: Name of the Timesketch timeline.
72 index_name: Name of the datastore index.
73 source_type: Type of file, csv or jsonl.
74 username: Username of the user who will own the timeline.
75
76 Returns:
77 String with summary of processed events.
78 """
79 # Log information to Celery
80 logging.info(u'Source type: %s', source_type)
81
82 cmd = [
83 u'psort.py', u'-o', u'timesketch', source_file_path, u'--name',
84 timeline_name, u'--status_view', u'none', u'--index', index_name
85 ]
86
87 if username:
88 cmd.append(u'--username')
89 cmd.append(username)
90
91 # Run psort.py
92 try:
93 cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
94 except subprocess.CalledProcessError as e:
95 # Mark the searchindex and timelines as failed and exit the task
96 _set_timeline_status(index_name, status=u'fail', error_msg=e.output)
97 return e.output
98
99 # Mark the searchindex and timelines as ready
100 _set_timeline_status(index_name, status=u'ready')
101
102 return cmd_output
103
104
105 @celery.task(track_started=True)
106 def run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,
107 username=None):
108 """Create a Celery task for processing a CSV or JSONL file.
109
110 Args:
111 source_file_path: Path to CSV or JSONL file.
112 timeline_name: Name of the Timesketch timeline.
113 index_name: Name of the datastore index.
114 source_type: Type of file, csv or jsonl.
115 username: Username of the user who will own the timeline.
116
117 Returns:
118 Dictionary with count of processed events.
119 """
120 event_type = u'generic_event' # Document type for Elasticsearch
121 validators = {
122 u'csv': read_and_validate_csv,
123 u'jsonl': read_and_validate_jsonl
124 }
125 read_and_validate = validators.get(source_type)
126
127 # Log information to Celery
128 logging.info(u'Index name: %s', index_name)
129 logging.info(u'Timeline name: %s', timeline_name)
130 logging.info(u'Source type: %s', source_type)
131 logging.info(u'Document type: %s', event_type)
132 logging.info(u'Owner: %s', username)
133
134 es = ElasticsearchDataStore(
135 host=current_app.config[u'ELASTIC_HOST'],
136 port=current_app.config[u'ELASTIC_PORT'])
137
138 # Reason for the broad exception catch is that we want to capture
139 # all possible errors and exit the task.
140 try:
141 es.create_index(index_name=index_name, doc_type=event_type)
142 for event in read_and_validate(source_file_path):
143 es.import_event(index_name, event_type, event)
144 # Import the remaining events
145 total_events = es.import_event(index_name, event_type)
146 except Exception as e:
147 # Mark the searchindex and timelines as failed and exit the task
148 error_msg = traceback.format_exc(e)
149 _set_timeline_status(index_name, status=u'fail', error_msg=error_msg)
150 logging.error(error_msg)
151 return
152
153 # Set status to ready when done
154 _set_timeline_status(index_name, status=u'ready')
155
156 return {u'Events processed': total_events}
157
[end of timesketch/lib/tasks.py]
[start of timesketch/lib/utils.py]
1 # Copyright 2015 Google Inc. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Common functions and utilities."""
15
16 import colorsys
17 import csv
18 import datetime
19 import json
20 import random
21 import time
22
23 from dateutil import parser
24
25
26 def random_color():
27 """Generates a random color.
28
29 Returns:
30 Color as string in HEX
31 """
32 hue = random.random()
33 golden_ratio_conjugate = (1 + 5**0.5) / 2
34 hue += golden_ratio_conjugate
35 hue %= 1
36 rgb = tuple(int(i * 256) for i in colorsys.hsv_to_rgb(hue, 0.5, 0.95))
37 return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])
38
39
40 def read_and_validate_csv(path):
41 """Generator for reading a CSV file.
42
43 Args:
44 path: Path to the CSV file
45 """
46 # Columns that must be present in the CSV file
47 mandatory_fields = [u'message', u'datetime', u'timestamp_desc']
48
49 with open(path, 'rb') as fh:
50
51 reader = csv.DictReader(fh)
52 csv_header = reader.fieldnames
53 missing_fields = []
54 # Validate the CSV header
55 for field in mandatory_fields:
56 if field not in csv_header:
57 missing_fields.append(field)
58 if missing_fields:
59 raise RuntimeError(
60 u'Missing fields in CSV header: {0:s}'.format(missing_fields))
61 for row in reader:
62 if u'timestamp' not in csv_header and u'datetime' in csv_header:
63 try:
64 parsed_datetime = parser.parse(row[u'datetime'])
65 row[u'timestamp'] = str(
66 int(time.mktime(parsed_datetime.timetuple())))
67 except ValueError:
68 continue
69
70 yield row
71
72
73 def read_and_validate_jsonl(path):
74 """Generator for reading a JSONL (json lines) file.
75
76 Args:
77 path: Path to the JSONL file
78 """
79 # Fields that must be present in each entry of the JSONL file.
80 mandatory_fields = [u'message', u'datetime', u'timestamp_desc']
81 with open(path, 'rb') as fh:
82
83 lineno = 0
84 for line in fh:
85 lineno += 1
86 try:
87 linedict = json.loads(line)
88 ld_keys = linedict.keys()
89 if u'datetime' not in ld_keys and u'timestamp' in ld_keys:
90 epoch = int(str(linedict[u'timestamp'])[:10])
91 dt = datetime.datetime.fromtimestamp(epoch)
92 linedict[u'datetime'] = dt.isoformat()
93 if u'timestamp' not in ld_keys and u'datetime' in ld_keys:
94 linedict[u'timestamp'] = parser.parse(linedict[u'datetime'])
95
96 missing_fields = []
97 for field in mandatory_fields:
98 if field not in linedict.keys():
99 missing_fields.append(field)
100 if missing_fields:
101 raise RuntimeError(
102 u"Missing field(s) at line {0:n}: {1:s}"
103 .format(lineno, missing_fields))
104
105 yield linedict
106
107 except ValueError as e:
108 raise RuntimeError(
109 u"Error parsing JSON at line {0:n}: {1:s}"
110 .format(lineno, e))
111
112
113 def get_validated_indices(indices, sketch_indices):
114 """Exclude any deleted search index references.
115
116 Args:
117 indices: List of indices from the user
118 sketch_indices: List of indices in the sketch
119
120 Returns:
121 Set of indices with those removed that is not in the sketch
122 """
123 exclude = set(indices) - set(sketch_indices)
124 if exclude:
125 indices = [index for index in indices if index not in exclude]
126 return indices
127
[end of timesketch/lib/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py
--- a/timesketch/lib/tasks.py
+++ b/timesketch/lib/tasks.py
@@ -104,7 +104,7 @@
@celery.task(track_started=True)
def run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,
- username=None):
+ delimiter=None, username=None):
"""Create a Celery task for processing a CSV or JSONL file.
Args:
@@ -112,6 +112,7 @@
timeline_name: Name of the Timesketch timeline.
index_name: Name of the datastore index.
source_type: Type of file, csv or jsonl.
+ delimiter: Character used as a field separator
username: Username of the user who will own the timeline.
Returns:
@@ -139,7 +140,7 @@
# all possible errors and exit the task.
try:
es.create_index(index_name=index_name, doc_type=event_type)
- for event in read_and_validate(source_file_path):
+ for event in read_and_validate(source_file_path, delimiter):
es.import_event(index_name, event_type, event)
# Import the remaining events
total_events = es.import_event(index_name, event_type)
diff --git a/timesketch/lib/utils.py b/timesketch/lib/utils.py
--- a/timesketch/lib/utils.py
+++ b/timesketch/lib/utils.py
@@ -37,18 +37,19 @@
return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])
-def read_and_validate_csv(path):
- """Generator for reading a CSV file.
+def read_and_validate_csv(path, delimiter):
+ """Generator for reading a CSV or TSV file.
Args:
- path: Path to the CSV file
+ path: Path to the file
+ delimiter: character used as a field separator
"""
# Columns that must be present in the CSV file
mandatory_fields = [u'message', u'datetime', u'timestamp_desc']
with open(path, 'rb') as fh:
- reader = csv.DictReader(fh)
+ reader = csv.DictReader(fh, delimiter=delimiter.decode('string_escape'))
csv_header = reader.fieldnames
missing_fields = []
# Validate the CSV header
@@ -70,7 +71,7 @@
yield row
-def read_and_validate_jsonl(path):
+def read_and_validate_jsonl(path, _):
"""Generator for reading a JSONL (json lines) file.
Args:
| {"golden_diff": "diff --git a/timesketch/lib/tasks.py b/timesketch/lib/tasks.py\n--- a/timesketch/lib/tasks.py\n+++ b/timesketch/lib/tasks.py\n@@ -104,7 +104,7 @@\n \n @celery.task(track_started=True)\n def run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,\n- username=None):\n+ delimiter=None, username=None):\n \"\"\"Create a Celery task for processing a CSV or JSONL file.\n \n Args:\n@@ -112,6 +112,7 @@\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n+ delimiter: Character used as a field separator\n username: Username of the user who will own the timeline.\n \n Returns:\n@@ -139,7 +140,7 @@\n # all possible errors and exit the task.\n try:\n es.create_index(index_name=index_name, doc_type=event_type)\n- for event in read_and_validate(source_file_path):\n+ for event in read_and_validate(source_file_path, delimiter):\n es.import_event(index_name, event_type, event)\n # Import the remaining events\n total_events = es.import_event(index_name, event_type)\ndiff --git a/timesketch/lib/utils.py b/timesketch/lib/utils.py\n--- a/timesketch/lib/utils.py\n+++ b/timesketch/lib/utils.py\n@@ -37,18 +37,19 @@\n return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])\n \n \n-def read_and_validate_csv(path):\n- \"\"\"Generator for reading a CSV file.\n+def read_and_validate_csv(path, delimiter):\n+ \"\"\"Generator for reading a CSV or TSV file.\n \n Args:\n- path: Path to the CSV file\n+ path: Path to the file\n+ delimiter: character used as a field separator\n \"\"\"\n # Columns that must be present in the CSV file\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n \n with open(path, 'rb') as fh:\n \n- reader = csv.DictReader(fh)\n+ reader = csv.DictReader(fh, delimiter=delimiter.decode('string_escape'))\n csv_header = reader.fieldnames\n missing_fields = []\n # Validate the CSV header\n@@ -70,7 +71,7 @@\n yield row\n \n \n-def read_and_validate_jsonl(path):\n+def read_and_validate_jsonl(path, _):\n \"\"\"Generator for reading a JSONL (json lines) file.\n \n Args:\n", "issue": "tsctl to support TSV files\n`tsctl` currently supports reading CSV files, however, often times its easier/less likely data gets broken by using _tab_ delimited files.\r\n\r\nThis is trivial to implement, e.g - :\r\n1. add the option `tsv2ts` to [`shell_manager`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L424)\r\n2. in `CreateTimelineFromCsv()`have an option for _delimiter_ and if `tsv2ts` was provided, change the CSV [`reader`](https://github.com/hiddenillusion/timesketch/blob/master/tsctl#L412) delimiter to a tab.\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Celery task for processing Plaso storage files.\"\"\"\n\nimport logging\nimport subprocess\nimport traceback\n\nfrom flask import current_app\n\nfrom timesketch import create_app\nfrom timesketch import create_celery_app\nfrom timesketch.lib.datastores.elastic import ElasticsearchDataStore\nfrom timesketch.lib.utils import read_and_validate_csv\nfrom timesketch.lib.utils import read_and_validate_jsonl\nfrom timesketch.models import db_session\nfrom timesketch.models.sketch import SearchIndex\nfrom timesketch.models.sketch import Timeline\n\ncelery = create_celery_app()\nflask_app = create_app()\n\n\ndef _set_timeline_status(index_name, status, error_msg=None):\n \"\"\"Helper function to set status for searchindex and all related timelines.\n\n Args:\n index_name: Name of the datastore index.\n status: Status to set.\n error_msg: Error message.\n \"\"\"\n # Run within Flask context so we can make database changes\n with flask_app.app_context():\n searchindex = SearchIndex.query.filter_by(index_name=index_name).first()\n timelines = Timeline.query.filter_by(searchindex=searchindex).all()\n\n # Set status\n searchindex.set_status(status)\n for timeline in timelines:\n timeline.set_status(status)\n db_session.add(timeline)\n\n # Update description if there was a failure in ingestion\n if error_msg and status == u'fail':\n # TODO: Don't overload the description field.\n searchindex.description = error_msg\n\n # Commit changes to database\n db_session.add(searchindex)\n db_session.commit()\n\n\[email protected](track_started=True)\ndef run_plaso(source_file_path, timeline_name, index_name, source_type,\n username=None):\n \"\"\"Create a Celery task for processing Plaso storage file.\n\n Args:\n source_file_path: Path to plaso storage file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n username: Username of the user who will own the timeline.\n\n Returns:\n String with summary of processed events.\n \"\"\"\n # Log information to Celery\n logging.info(u'Source type: %s', source_type)\n\n cmd = [\n u'psort.py', u'-o', u'timesketch', source_file_path, u'--name',\n timeline_name, u'--status_view', u'none', u'--index', index_name\n ]\n\n if username:\n cmd.append(u'--username')\n cmd.append(username)\n\n # Run psort.py\n try:\n cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n # Mark the searchindex and timelines as failed and exit the task\n _set_timeline_status(index_name, status=u'fail', error_msg=e.output)\n return e.output\n\n # Mark the searchindex and timelines as ready\n _set_timeline_status(index_name, status=u'ready')\n\n return cmd_output\n\n\[email protected](track_started=True)\ndef run_csv_jsonl(source_file_path, timeline_name, index_name, source_type,\n username=None):\n \"\"\"Create a Celery task for processing a CSV or JSONL file.\n\n Args:\n source_file_path: Path to CSV or JSONL file.\n timeline_name: Name of the Timesketch timeline.\n index_name: Name of the datastore index.\n source_type: Type of file, csv or jsonl.\n username: Username of the user who will own the timeline.\n\n Returns:\n Dictionary with count of processed events.\n \"\"\"\n event_type = u'generic_event' # Document type for Elasticsearch\n validators = {\n u'csv': read_and_validate_csv,\n u'jsonl': read_and_validate_jsonl\n }\n read_and_validate = validators.get(source_type)\n\n # Log information to Celery\n logging.info(u'Index name: %s', index_name)\n logging.info(u'Timeline name: %s', timeline_name)\n logging.info(u'Source type: %s', source_type)\n logging.info(u'Document type: %s', event_type)\n logging.info(u'Owner: %s', username)\n\n es = ElasticsearchDataStore(\n host=current_app.config[u'ELASTIC_HOST'],\n port=current_app.config[u'ELASTIC_PORT'])\n\n # Reason for the broad exception catch is that we want to capture\n # all possible errors and exit the task.\n try:\n es.create_index(index_name=index_name, doc_type=event_type)\n for event in read_and_validate(source_file_path):\n es.import_event(index_name, event_type, event)\n # Import the remaining events\n total_events = es.import_event(index_name, event_type)\n except Exception as e:\n # Mark the searchindex and timelines as failed and exit the task\n error_msg = traceback.format_exc(e)\n _set_timeline_status(index_name, status=u'fail', error_msg=error_msg)\n logging.error(error_msg)\n return\n\n # Set status to ready when done\n _set_timeline_status(index_name, status=u'ready')\n\n return {u'Events processed': total_events}\n", "path": "timesketch/lib/tasks.py"}, {"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Common functions and utilities.\"\"\"\n\nimport colorsys\nimport csv\nimport datetime\nimport json\nimport random\nimport time\n\nfrom dateutil import parser\n\n\ndef random_color():\n \"\"\"Generates a random color.\n\n Returns:\n Color as string in HEX\n \"\"\"\n hue = random.random()\n golden_ratio_conjugate = (1 + 5**0.5) / 2\n hue += golden_ratio_conjugate\n hue %= 1\n rgb = tuple(int(i * 256) for i in colorsys.hsv_to_rgb(hue, 0.5, 0.95))\n return u'{0:02X}{1:02X}{2:02X}'.format(rgb[0], rgb[1], rgb[2])\n\n\ndef read_and_validate_csv(path):\n \"\"\"Generator for reading a CSV file.\n\n Args:\n path: Path to the CSV file\n \"\"\"\n # Columns that must be present in the CSV file\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n\n with open(path, 'rb') as fh:\n\n reader = csv.DictReader(fh)\n csv_header = reader.fieldnames\n missing_fields = []\n # Validate the CSV header\n for field in mandatory_fields:\n if field not in csv_header:\n missing_fields.append(field)\n if missing_fields:\n raise RuntimeError(\n u'Missing fields in CSV header: {0:s}'.format(missing_fields))\n for row in reader:\n if u'timestamp' not in csv_header and u'datetime' in csv_header:\n try:\n parsed_datetime = parser.parse(row[u'datetime'])\n row[u'timestamp'] = str(\n int(time.mktime(parsed_datetime.timetuple())))\n except ValueError:\n continue\n\n yield row\n\n\ndef read_and_validate_jsonl(path):\n \"\"\"Generator for reading a JSONL (json lines) file.\n\n Args:\n path: Path to the JSONL file\n \"\"\"\n # Fields that must be present in each entry of the JSONL file.\n mandatory_fields = [u'message', u'datetime', u'timestamp_desc']\n with open(path, 'rb') as fh:\n\n lineno = 0\n for line in fh:\n lineno += 1\n try:\n linedict = json.loads(line)\n ld_keys = linedict.keys()\n if u'datetime' not in ld_keys and u'timestamp' in ld_keys:\n epoch = int(str(linedict[u'timestamp'])[:10])\n dt = datetime.datetime.fromtimestamp(epoch)\n linedict[u'datetime'] = dt.isoformat()\n if u'timestamp' not in ld_keys and u'datetime' in ld_keys:\n linedict[u'timestamp'] = parser.parse(linedict[u'datetime'])\n\n missing_fields = []\n for field in mandatory_fields:\n if field not in linedict.keys():\n missing_fields.append(field)\n if missing_fields:\n raise RuntimeError(\n u\"Missing field(s) at line {0:n}: {1:s}\"\n .format(lineno, missing_fields))\n\n yield linedict\n\n except ValueError as e:\n raise RuntimeError(\n u\"Error parsing JSON at line {0:n}: {1:s}\"\n .format(lineno, e))\n\n\ndef get_validated_indices(indices, sketch_indices):\n \"\"\"Exclude any deleted search index references.\n\n Args:\n indices: List of indices from the user\n sketch_indices: List of indices in the sketch\n\n Returns:\n Set of indices with those removed that is not in the sketch\n \"\"\"\n exclude = set(indices) - set(sketch_indices)\n if exclude:\n indices = [index for index in indices if index not in exclude]\n return indices\n", "path": "timesketch/lib/utils.py"}]} | 3,552 | 599 |
gh_patches_debug_34807 | rasdani/github-patches | git_diff | conan-io__conan-4611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation message for `cppinfo.cppflags`
Introduce a new alias of `cppflags` named `cxxflags` and change the docs. If possible, show a deprecation message when cppflags is used.
The choice of `cppflags` meaning `c++flags` was not good, because `cppflags` means preprocessor flags and it is confusing.
</issue>
<code>
[start of conans/model/build_info.py]
1 import os
2 from collections import OrderedDict
3
4
5 DEFAULT_INCLUDE = "include"
6 DEFAULT_LIB = "lib"
7 DEFAULT_BIN = "bin"
8 DEFAULT_RES = "res"
9 DEFAULT_SHARE = "share"
10
11
12 class _CppInfo(object):
13 """ Object that stores all the necessary information to build in C/C++.
14 It is intended to be system independent, translation to
15 specific systems will be produced from this info
16 """
17 def __init__(self):
18 self.includedirs = [] # Ordered list of include paths
19 self.srcdirs = [] # Ordered list of source paths
20 self.libdirs = [] # Directories to find libraries
21 self.resdirs = [] # Directories to find resources, data, etc
22 self.bindirs = [] # Directories to find executables and shared libs
23 self.builddirs = []
24 self.rootpaths = []
25 self.libs = [] # The libs to link against
26 self.defines = [] # preprocessor definitions
27 self.cflags = [] # pure C flags
28 self.cppflags = [] # C++ compilation flags
29 self.sharedlinkflags = [] # linker flags
30 self.exelinkflags = [] # linker flags
31 self.rootpath = ""
32 self.sysroot = ""
33 self._include_paths = None
34 self._lib_paths = None
35 self._bin_paths = None
36 self._build_paths = None
37 self._res_paths = None
38 self._src_paths = None
39 self.version = None # Version of the conan package
40 self.description = None # Description of the conan package
41 # When package is editable, filter_empty=False, so empty dirs are maintained
42 self.filter_empty = True
43
44 def _filter_paths(self, paths):
45 abs_paths = [os.path.join(self.rootpath, p)
46 if not os.path.isabs(p) else p for p in paths]
47 if self.filter_empty:
48 return [p for p in abs_paths if os.path.isdir(p)]
49 else:
50 return abs_paths
51
52 @property
53 def include_paths(self):
54 if self._include_paths is None:
55 self._include_paths = self._filter_paths(self.includedirs)
56 return self._include_paths
57
58 @property
59 def lib_paths(self):
60 if self._lib_paths is None:
61 self._lib_paths = self._filter_paths(self.libdirs)
62 return self._lib_paths
63
64 @property
65 def src_paths(self):
66 if self._src_paths is None:
67 self._src_paths = self._filter_paths(self.srcdirs)
68 return self._src_paths
69
70 @property
71 def bin_paths(self):
72 if self._bin_paths is None:
73 self._bin_paths = self._filter_paths(self.bindirs)
74 return self._bin_paths
75
76 @property
77 def build_paths(self):
78 if self._build_paths is None:
79 self._build_paths = self._filter_paths(self.builddirs)
80 return self._build_paths
81
82 @property
83 def res_paths(self):
84 if self._res_paths is None:
85 self._res_paths = self._filter_paths(self.resdirs)
86 return self._res_paths
87
88
89 class CppInfo(_CppInfo):
90 """ Build Information declared to be used by the CONSUMERS of a
91 conans. That means that consumers must use this flags and configs i order
92 to build properly.
93 Defined in user CONANFILE, directories are relative at user definition time
94 """
95 def __init__(self, root_folder):
96 super(CppInfo, self).__init__()
97 self.rootpath = root_folder # the full path of the package in which the conans is found
98 self.includedirs.append(DEFAULT_INCLUDE)
99 self.libdirs.append(DEFAULT_LIB)
100 self.bindirs.append(DEFAULT_BIN)
101 self.resdirs.append(DEFAULT_RES)
102 self.builddirs.append("")
103 # public_deps is needed to accumulate list of deps for cmake targets
104 self.public_deps = []
105 self.configs = {}
106
107 def __getattr__(self, config):
108
109 def _get_cpp_info():
110 result = _CppInfo()
111 result.rootpath = self.rootpath
112 result.sysroot = self.sysroot
113 result.includedirs.append(DEFAULT_INCLUDE)
114 result.libdirs.append(DEFAULT_LIB)
115 result.bindirs.append(DEFAULT_BIN)
116 result.resdirs.append(DEFAULT_RES)
117 result.builddirs.append("")
118 return result
119
120 return self.configs.setdefault(config, _get_cpp_info())
121
122
123 class _BaseDepsCppInfo(_CppInfo):
124 def __init__(self):
125 super(_BaseDepsCppInfo, self).__init__()
126
127 def update(self, dep_cpp_info):
128
129 def merge_lists(seq1, seq2):
130 return [s for s in seq1 if s not in seq2] + seq2
131
132 self.includedirs = merge_lists(self.includedirs, dep_cpp_info.include_paths)
133 self.srcdirs = merge_lists(self.srcdirs, dep_cpp_info.src_paths)
134 self.libdirs = merge_lists(self.libdirs, dep_cpp_info.lib_paths)
135 self.bindirs = merge_lists(self.bindirs, dep_cpp_info.bin_paths)
136 self.resdirs = merge_lists(self.resdirs, dep_cpp_info.res_paths)
137 self.builddirs = merge_lists(self.builddirs, dep_cpp_info.build_paths)
138 self.libs = merge_lists(self.libs, dep_cpp_info.libs)
139 self.rootpaths.append(dep_cpp_info.rootpath)
140
141 # Note these are in reverse order
142 self.defines = merge_lists(dep_cpp_info.defines, self.defines)
143 self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)
144 self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)
145 self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)
146 self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)
147
148 if not self.sysroot:
149 self.sysroot = dep_cpp_info.sysroot
150
151 @property
152 def include_paths(self):
153 return self.includedirs
154
155 @property
156 def lib_paths(self):
157 return self.libdirs
158
159 @property
160 def src_paths(self):
161 return self.srcdirs
162
163 @property
164 def bin_paths(self):
165 return self.bindirs
166
167 @property
168 def build_paths(self):
169 return self.builddirs
170
171 @property
172 def res_paths(self):
173 return self.resdirs
174
175
176 class DepsCppInfo(_BaseDepsCppInfo):
177 """ Build Information necessary to build a given conans. It contains the
178 flags, directories and options if its dependencies. The conans CONANFILE
179 should use these flags to pass them to the underlaying build system (Cmake, make),
180 so deps info is managed
181 """
182
183 def __init__(self):
184 super(DepsCppInfo, self).__init__()
185 self._dependencies = OrderedDict()
186 self.configs = {}
187
188 def __getattr__(self, config):
189 return self.configs.setdefault(config, _BaseDepsCppInfo())
190
191 @property
192 def dependencies(self):
193 return self._dependencies.items()
194
195 @property
196 def deps(self):
197 return self._dependencies.keys()
198
199 def __getitem__(self, item):
200 return self._dependencies[item]
201
202 def update(self, dep_cpp_info, pkg_name):
203 assert isinstance(dep_cpp_info, CppInfo)
204 self._dependencies[pkg_name] = dep_cpp_info
205 super(DepsCppInfo, self).update(dep_cpp_info)
206 for config, cpp_info in dep_cpp_info.configs.items():
207 self.configs.setdefault(config, _BaseDepsCppInfo()).update(cpp_info)
208
209 def update_deps_cpp_info(self, dep_cpp_info):
210 assert isinstance(dep_cpp_info, DepsCppInfo)
211 for pkg_name, cpp_info in dep_cpp_info.dependencies:
212 self.update(cpp_info, pkg_name)
213
[end of conans/model/build_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/model/build_info.py b/conans/model/build_info.py
--- a/conans/model/build_info.py
+++ b/conans/model/build_info.py
@@ -1,6 +1,7 @@
import os
from collections import OrderedDict
+import deprecation
DEFAULT_INCLUDE = "include"
DEFAULT_LIB = "lib"
@@ -25,7 +26,7 @@
self.libs = [] # The libs to link against
self.defines = [] # preprocessor definitions
self.cflags = [] # pure C flags
- self.cppflags = [] # C++ compilation flags
+ self.cxxflags = [] # C++ compilation flags
self.sharedlinkflags = [] # linker flags
self.exelinkflags = [] # linker flags
self.rootpath = ""
@@ -85,6 +86,17 @@
self._res_paths = self._filter_paths(self.resdirs)
return self._res_paths
+ # Compatibility for 'cppflags' (old style property to allow decoration)
+ @deprecation.deprecated(deprecated_in="1.13", removed_in="2.0", details="Use 'cxxflags' instead")
+ def get_cppflags(self):
+ return self.cxxflags
+
+ @deprecation.deprecated(deprecated_in="1.13", removed_in="2.0", details="Use 'cxxflags' instead")
+ def set_cppflags(self, value):
+ self.cxxflags = value
+
+ cppflags = property(get_cppflags, set_cppflags)
+
class CppInfo(_CppInfo):
""" Build Information declared to be used by the CONSUMERS of a
@@ -140,7 +152,7 @@
# Note these are in reverse order
self.defines = merge_lists(dep_cpp_info.defines, self.defines)
- self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)
+ self.cxxflags = merge_lists(dep_cpp_info.cxxflags, self.cxxflags)
self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)
self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)
self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)
| {"golden_diff": "diff --git a/conans/model/build_info.py b/conans/model/build_info.py\n--- a/conans/model/build_info.py\n+++ b/conans/model/build_info.py\n@@ -1,6 +1,7 @@\n import os\n from collections import OrderedDict\n \n+import deprecation\n \n DEFAULT_INCLUDE = \"include\"\n DEFAULT_LIB = \"lib\"\n@@ -25,7 +26,7 @@\n self.libs = [] # The libs to link against\n self.defines = [] # preprocessor definitions\n self.cflags = [] # pure C flags\n- self.cppflags = [] # C++ compilation flags\n+ self.cxxflags = [] # C++ compilation flags\n self.sharedlinkflags = [] # linker flags\n self.exelinkflags = [] # linker flags\n self.rootpath = \"\"\n@@ -85,6 +86,17 @@\n self._res_paths = self._filter_paths(self.resdirs)\n return self._res_paths\n \n+ # Compatibility for 'cppflags' (old style property to allow decoration)\n+ @deprecation.deprecated(deprecated_in=\"1.13\", removed_in=\"2.0\", details=\"Use 'cxxflags' instead\")\n+ def get_cppflags(self):\n+ return self.cxxflags\n+\n+ @deprecation.deprecated(deprecated_in=\"1.13\", removed_in=\"2.0\", details=\"Use 'cxxflags' instead\")\n+ def set_cppflags(self, value):\n+ self.cxxflags = value\n+\n+ cppflags = property(get_cppflags, set_cppflags)\n+\n \n class CppInfo(_CppInfo):\n \"\"\" Build Information declared to be used by the CONSUMERS of a\n@@ -140,7 +152,7 @@\n \n # Note these are in reverse order\n self.defines = merge_lists(dep_cpp_info.defines, self.defines)\n- self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)\n+ self.cxxflags = merge_lists(dep_cpp_info.cxxflags, self.cxxflags)\n self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)\n self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)\n self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)\n", "issue": "Deprecation message for `cppinfo.cppflags`\nIntroduce a new alias of `cppflags` named `cxxflags` and change the docs. If possible, show a deprecation message when cppflags is used.\r\nThe choice of `cppflags` meaning `c++flags` was not good, because `cppflags` means preprocessor flags and it is confusing.\n", "before_files": [{"content": "import os\nfrom collections import OrderedDict\n\n\nDEFAULT_INCLUDE = \"include\"\nDEFAULT_LIB = \"lib\"\nDEFAULT_BIN = \"bin\"\nDEFAULT_RES = \"res\"\nDEFAULT_SHARE = \"share\"\n\n\nclass _CppInfo(object):\n \"\"\" Object that stores all the necessary information to build in C/C++.\n It is intended to be system independent, translation to\n specific systems will be produced from this info\n \"\"\"\n def __init__(self):\n self.includedirs = [] # Ordered list of include paths\n self.srcdirs = [] # Ordered list of source paths\n self.libdirs = [] # Directories to find libraries\n self.resdirs = [] # Directories to find resources, data, etc\n self.bindirs = [] # Directories to find executables and shared libs\n self.builddirs = []\n self.rootpaths = []\n self.libs = [] # The libs to link against\n self.defines = [] # preprocessor definitions\n self.cflags = [] # pure C flags\n self.cppflags = [] # C++ compilation flags\n self.sharedlinkflags = [] # linker flags\n self.exelinkflags = [] # linker flags\n self.rootpath = \"\"\n self.sysroot = \"\"\n self._include_paths = None\n self._lib_paths = None\n self._bin_paths = None\n self._build_paths = None\n self._res_paths = None\n self._src_paths = None\n self.version = None # Version of the conan package\n self.description = None # Description of the conan package\n # When package is editable, filter_empty=False, so empty dirs are maintained\n self.filter_empty = True\n\n def _filter_paths(self, paths):\n abs_paths = [os.path.join(self.rootpath, p)\n if not os.path.isabs(p) else p for p in paths]\n if self.filter_empty:\n return [p for p in abs_paths if os.path.isdir(p)]\n else:\n return abs_paths\n\n @property\n def include_paths(self):\n if self._include_paths is None:\n self._include_paths = self._filter_paths(self.includedirs)\n return self._include_paths\n\n @property\n def lib_paths(self):\n if self._lib_paths is None:\n self._lib_paths = self._filter_paths(self.libdirs)\n return self._lib_paths\n\n @property\n def src_paths(self):\n if self._src_paths is None:\n self._src_paths = self._filter_paths(self.srcdirs)\n return self._src_paths\n\n @property\n def bin_paths(self):\n if self._bin_paths is None:\n self._bin_paths = self._filter_paths(self.bindirs)\n return self._bin_paths\n\n @property\n def build_paths(self):\n if self._build_paths is None:\n self._build_paths = self._filter_paths(self.builddirs)\n return self._build_paths\n\n @property\n def res_paths(self):\n if self._res_paths is None:\n self._res_paths = self._filter_paths(self.resdirs)\n return self._res_paths\n\n\nclass CppInfo(_CppInfo):\n \"\"\" Build Information declared to be used by the CONSUMERS of a\n conans. That means that consumers must use this flags and configs i order\n to build properly.\n Defined in user CONANFILE, directories are relative at user definition time\n \"\"\"\n def __init__(self, root_folder):\n super(CppInfo, self).__init__()\n self.rootpath = root_folder # the full path of the package in which the conans is found\n self.includedirs.append(DEFAULT_INCLUDE)\n self.libdirs.append(DEFAULT_LIB)\n self.bindirs.append(DEFAULT_BIN)\n self.resdirs.append(DEFAULT_RES)\n self.builddirs.append(\"\")\n # public_deps is needed to accumulate list of deps for cmake targets\n self.public_deps = []\n self.configs = {}\n\n def __getattr__(self, config):\n\n def _get_cpp_info():\n result = _CppInfo()\n result.rootpath = self.rootpath\n result.sysroot = self.sysroot\n result.includedirs.append(DEFAULT_INCLUDE)\n result.libdirs.append(DEFAULT_LIB)\n result.bindirs.append(DEFAULT_BIN)\n result.resdirs.append(DEFAULT_RES)\n result.builddirs.append(\"\")\n return result\n\n return self.configs.setdefault(config, _get_cpp_info())\n\n\nclass _BaseDepsCppInfo(_CppInfo):\n def __init__(self):\n super(_BaseDepsCppInfo, self).__init__()\n\n def update(self, dep_cpp_info):\n\n def merge_lists(seq1, seq2):\n return [s for s in seq1 if s not in seq2] + seq2\n\n self.includedirs = merge_lists(self.includedirs, dep_cpp_info.include_paths)\n self.srcdirs = merge_lists(self.srcdirs, dep_cpp_info.src_paths)\n self.libdirs = merge_lists(self.libdirs, dep_cpp_info.lib_paths)\n self.bindirs = merge_lists(self.bindirs, dep_cpp_info.bin_paths)\n self.resdirs = merge_lists(self.resdirs, dep_cpp_info.res_paths)\n self.builddirs = merge_lists(self.builddirs, dep_cpp_info.build_paths)\n self.libs = merge_lists(self.libs, dep_cpp_info.libs)\n self.rootpaths.append(dep_cpp_info.rootpath)\n\n # Note these are in reverse order\n self.defines = merge_lists(dep_cpp_info.defines, self.defines)\n self.cppflags = merge_lists(dep_cpp_info.cppflags, self.cppflags)\n self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)\n self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)\n self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)\n\n if not self.sysroot:\n self.sysroot = dep_cpp_info.sysroot\n\n @property\n def include_paths(self):\n return self.includedirs\n\n @property\n def lib_paths(self):\n return self.libdirs\n\n @property\n def src_paths(self):\n return self.srcdirs\n\n @property\n def bin_paths(self):\n return self.bindirs\n\n @property\n def build_paths(self):\n return self.builddirs\n\n @property\n def res_paths(self):\n return self.resdirs\n\n\nclass DepsCppInfo(_BaseDepsCppInfo):\n \"\"\" Build Information necessary to build a given conans. It contains the\n flags, directories and options if its dependencies. The conans CONANFILE\n should use these flags to pass them to the underlaying build system (Cmake, make),\n so deps info is managed\n \"\"\"\n\n def __init__(self):\n super(DepsCppInfo, self).__init__()\n self._dependencies = OrderedDict()\n self.configs = {}\n\n def __getattr__(self, config):\n return self.configs.setdefault(config, _BaseDepsCppInfo())\n\n @property\n def dependencies(self):\n return self._dependencies.items()\n\n @property\n def deps(self):\n return self._dependencies.keys()\n\n def __getitem__(self, item):\n return self._dependencies[item]\n\n def update(self, dep_cpp_info, pkg_name):\n assert isinstance(dep_cpp_info, CppInfo)\n self._dependencies[pkg_name] = dep_cpp_info\n super(DepsCppInfo, self).update(dep_cpp_info)\n for config, cpp_info in dep_cpp_info.configs.items():\n self.configs.setdefault(config, _BaseDepsCppInfo()).update(cpp_info)\n\n def update_deps_cpp_info(self, dep_cpp_info):\n assert isinstance(dep_cpp_info, DepsCppInfo)\n for pkg_name, cpp_info in dep_cpp_info.dependencies:\n self.update(cpp_info, pkg_name)\n", "path": "conans/model/build_info.py"}]} | 2,869 | 512 |
gh_patches_debug_14629 | rasdani/github-patches | git_diff | biolab__orange3-2608 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Names of models are not saved as settings
1. Put logistic regression on the canvas.
2. Edit its name
3. Save the schema
4. Close canvas
5. Open Orange and load the schema
The logistic regression name is reverted back to Logistic regression.
--------
The base class for learners sets `self.learner_name = self.name` in `__init__`, where `self.name` is the widgets class attribute `name`, which is set later in the derived classes. So `learner_name = Setting(name)` or `learner_name = Setting(OWWidget.name)` won't work since the name is only defined later. Having `learner_name = Setting("")` would require some magic.
`OWBaseLearner` already has a meta class. This may be the point where the setting can be properly initialized since the `name` is already set then.
</issue>
<code>
[start of Orange/widgets/utils/owlearnerwidget.py]
1 from copy import deepcopy
2
3 import itertools
4 import numpy as np
5
6 from AnyQt.QtCore import QTimer, Qt
7
8 from Orange.data import Table
9 from Orange.modelling import Fitter, Learner, Model
10 from Orange.preprocess.preprocess import Preprocess
11 from Orange.widgets import gui
12 from Orange.widgets.settings import Setting
13 from Orange.widgets.utils import getmembers
14 from Orange.widgets.utils.signals import Output, Input
15 from Orange.widgets.utils.sql import check_sql_input
16 from Orange.widgets.widget import OWWidget, WidgetMetaClass, Msg
17
18
19 class OWBaseLearnerMeta(WidgetMetaClass):
20 """ Meta class for learner widgets
21
22 OWBaseLearner declares two outputs, learner and model with
23 generic type (Learner and Model).
24
25 This metaclass ensures that each of the subclasses gets
26 its own Outputs class with output that match the corresponding
27 learner.
28 """
29 def __new__(cls, name, bases, attributes):
30 def abstract_widget():
31 return not attributes.get("name")
32
33 def copy_outputs(template):
34 result = type("Outputs", (), {})
35 for name, signal in getmembers(template, Output):
36 setattr(result, name, deepcopy(signal))
37 return result
38
39 obj = super().__new__(cls, name, bases, attributes)
40 if abstract_widget():
41 return obj
42
43 learner = attributes.get("LEARNER")
44 if not learner:
45 raise AttributeError(
46 "'{}' must declare attribute LEARNER".format(name))
47
48 outputs = obj.Outputs = copy_outputs(obj.Outputs)
49 outputs.learner.type = learner
50 outputs.model.type = learner.__returns__
51
52 return obj
53
54
55 class OWBaseLearner(OWWidget, metaclass=OWBaseLearnerMeta):
56 """Abstract widget for classification/regression learners.
57
58 Notes
59 -----
60 All learner widgets should define learner class LEARNER.
61 LEARNER should have __returns__ attribute.
62
63 Overwrite `create_learner`, `add_main_layout` and `get_learner_parameters`
64 in case LEARNER has extra parameters.
65
66 """
67 LEARNER = None
68 supports_sparse = True
69
70 want_main_area = False
71 resizing_enabled = False
72 auto_apply = Setting(True)
73
74 class Error(OWWidget.Error):
75 data_error = Msg("{}")
76 fitting_failed = Msg("Fitting failed.\n{}")
77 sparse_not_supported = Msg("Sparse data is not supported.")
78 out_of_memory = Msg("Out of memory.")
79
80 class Warning(OWWidget.Warning):
81 outdated_learner = Msg("Press Apply to submit changes.")
82
83 class Inputs:
84 data = Input("Data", Table)
85 preprocessor = Input("Preprocessor", Preprocess)
86
87 class Outputs:
88 learner = Output("Learner", Learner, dynamic=False)
89 model = Output("Model", Model, dynamic=False,
90 replaces=["Classifier", "Predictor"])
91
92 OUTPUT_MODEL_NAME = Outputs.model.name # Attr for backcompat w/ self.send() code
93
94 def __init__(self):
95 super().__init__()
96 self.data = None
97 self.valid_data = False
98 self.learner = None
99 self.learner_name = self.name
100 self.model = None
101 self.preprocessors = None
102 self.outdated_settings = False
103
104 self.setup_layout()
105 QTimer.singleShot(0, getattr(self, "unconditional_apply", self.apply))
106
107 def create_learner(self):
108 """Creates a learner with current configuration.
109
110 Returns:
111 Learner: an instance of Orange.base.learner subclass.
112 """
113 return self.LEARNER(preprocessors=self.preprocessors)
114
115 def get_learner_parameters(self):
116 """Creates an `OrderedDict` or a sequence of pairs with current model
117 configuration.
118
119 Returns:
120 OrderedDict or List: (option, value) pairs or dict
121 """
122 return []
123
124 @Inputs.preprocessor
125 def set_preprocessor(self, preprocessor):
126 self.preprocessors = preprocessor
127 self.apply()
128
129 @Inputs.data
130 @check_sql_input
131 def set_data(self, data):
132 """Set the input train data set."""
133 self.Error.data_error.clear()
134 self.data = data
135 if data is not None and data.domain.class_var is None:
136 self.Error.data_error("Data has no target variable.")
137 self.data = None
138
139 self.update_model()
140
141 def apply(self):
142 """Applies learner and sends new model."""
143 self.update_learner()
144 self.update_model()
145
146 def update_learner(self):
147 self.learner = self.create_learner()
148 if self.learner and issubclass(self.LEARNER, Fitter):
149 self.learner.use_default_preprocessors = True
150 if self.learner is not None:
151 self.learner.name = self.learner_name
152 self.Outputs.learner.send(self.learner)
153 self.outdated_settings = False
154 self.Warning.outdated_learner.clear()
155
156 def show_fitting_failed(self, exc):
157 """Show error when fitting fails.
158 Derived widgets can override this to show more specific messages."""
159 self.Error.fitting_failed(str(exc), shown=exc is not None)
160
161 def update_model(self):
162 self.show_fitting_failed(None)
163 self.model = None
164 if self.check_data():
165 try:
166 self.model = self.learner(self.data)
167 except BaseException as exc:
168 self.show_fitting_failed(exc)
169 else:
170 self.model.name = self.learner_name
171 self.model.instances = self.data
172 self.Outputs.model.send(self.model)
173
174 def check_data(self):
175 self.valid_data = False
176 self.Error.sparse_not_supported.clear()
177 if self.data is not None and self.learner is not None:
178 self.Error.data_error.clear()
179 if not self.learner.check_learner_adequacy(self.data.domain):
180 self.Error.data_error(self.learner.learner_adequacy_err_msg)
181 elif not len(self.data):
182 self.Error.data_error("Data set is empty.")
183 elif len(np.unique(self.data.Y)) < 2:
184 self.Error.data_error("Data contains a single target value.")
185 elif self.data.X.size == 0:
186 self.Error.data_error("Data has no features to learn from.")
187 elif self.data.is_sparse() and not self.supports_sparse:
188 self.Error.sparse_not_supported()
189 else:
190 self.valid_data = True
191 return self.valid_data
192
193 def settings_changed(self, *args, **kwargs):
194 self.outdated_settings = True
195 self.Warning.outdated_learner(shown=not self.auto_apply)
196 self.apply()
197
198 def _change_name(self, instance, output):
199 if instance:
200 instance.name = self.learner_name
201 if self.auto_apply:
202 output.send(instance)
203
204 def learner_name_changed(self):
205 self._change_name(self.learner, self.Outputs.learner)
206 self._change_name(self.model, self.Outputs.model)
207
208 def send_report(self):
209 self.report_items((("Name", self.learner_name),))
210
211 model_parameters = self.get_learner_parameters()
212 if model_parameters:
213 self.report_items("Model parameters", model_parameters)
214
215 if self.data:
216 self.report_data("Data", self.data)
217
218 # GUI
219 def setup_layout(self):
220 self.add_learner_name_widget()
221 self.add_main_layout()
222 # Options specific to target variable type, if supported
223 if issubclass(self.LEARNER, Fitter):
224 # Only add a classification section if the method is overridden
225 if type(self).add_classification_layout is not \
226 OWBaseLearner.add_classification_layout:
227 classification_box = gui.widgetBox(
228 self.controlArea, 'Classification')
229 self.add_classification_layout(classification_box)
230 # Only add a regression section if the method is overridden
231 if type(self).add_regression_layout is not \
232 OWBaseLearner.add_regression_layout:
233 regression_box = gui.widgetBox(self.controlArea, 'Regression')
234 self.add_regression_layout(regression_box)
235 self.add_bottom_buttons()
236
237 def add_main_layout(self):
238 """Creates layout with the learner configuration widgets.
239
240 Override this method for laying out any learner-specific parameter controls.
241 See setup_layout() method for execution order.
242 """
243 pass
244
245 def add_classification_layout(self, box):
246 """Creates layout for classification specific options.
247
248 If a widget outputs a learner dispatcher, sometimes the classification
249 and regression learners require different options.
250 See `setup_layout()` method for execution order.
251 """
252 pass
253
254 def add_regression_layout(self, box):
255 """Creates layout for regression specific options.
256
257 If a widget outputs a learner dispatcher, sometimes the classification
258 and regression learners require different options.
259 See `setup_layout()` method for execution order.
260 """
261 pass
262
263 def add_learner_name_widget(self):
264 self.name_line_edit = gui.lineEdit(
265 self.controlArea, self, 'learner_name', box='Name',
266 tooltip='The name will identify this model in other widgets',
267 orientation=Qt.Horizontal, callback=self.learner_name_changed)
268
269 def add_bottom_buttons(self):
270 box = gui.hBox(self.controlArea, True)
271 box.layout().addWidget(self.report_button)
272 gui.separator(box, 15)
273 self.apply_button = gui.auto_commit(box, self, 'auto_apply', '&Apply',
274 box=False, commit=self.apply)
275
276 def send(self, signalName, value, id=None):
277 # A subclass might still use the old syntax to send outputs
278 # defined on this class
279 for _, output in getmembers(self.Outputs, Output):
280 if output.name == signalName or signalName in output.replaces:
281 output.send(value, id=id)
282 return
283
284 super().send(signalName, value, id)
285
286 @classmethod
287 def get_widget_description(cls):
288 # When a subclass defines defines old-style signals, those override
289 # the new-style ones, so we add them manually
290 desc = super().get_widget_description()
291
292 if cls.outputs:
293 desc["outputs"].extend(cls.get_signals("outputs", True))
294 if cls.inputs:
295 desc["inputs"].extend(cls.get_signals("inputs", True))
296 return desc
297
[end of Orange/widgets/utils/owlearnerwidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Orange/widgets/utils/owlearnerwidget.py b/Orange/widgets/utils/owlearnerwidget.py
--- a/Orange/widgets/utils/owlearnerwidget.py
+++ b/Orange/widgets/utils/owlearnerwidget.py
@@ -67,6 +67,7 @@
LEARNER = None
supports_sparse = True
+ learner_name = Setting(None, schema_only=True)
want_main_area = False
resizing_enabled = False
auto_apply = Setting(True)
@@ -96,7 +97,8 @@
self.data = None
self.valid_data = False
self.learner = None
- self.learner_name = self.name
+ if self.learner_name is None:
+ self.learner_name = self.name
self.model = None
self.preprocessors = None
self.outdated_settings = False
| {"golden_diff": "diff --git a/Orange/widgets/utils/owlearnerwidget.py b/Orange/widgets/utils/owlearnerwidget.py\n--- a/Orange/widgets/utils/owlearnerwidget.py\n+++ b/Orange/widgets/utils/owlearnerwidget.py\n@@ -67,6 +67,7 @@\n LEARNER = None\n supports_sparse = True\n \n+ learner_name = Setting(None, schema_only=True)\n want_main_area = False\n resizing_enabled = False\n auto_apply = Setting(True)\n@@ -96,7 +97,8 @@\n self.data = None\n self.valid_data = False\n self.learner = None\n- self.learner_name = self.name\n+ if self.learner_name is None:\n+ self.learner_name = self.name\n self.model = None\n self.preprocessors = None\n self.outdated_settings = False\n", "issue": "Names of models are not saved as settings\n1. Put logistic regression on the canvas.\r\n2. Edit its name\r\n3. Save the schema\r\n4. Close canvas\r\n5. Open Orange and load the schema\r\n\r\nThe logistic regression name is reverted back to Logistic regression.\r\n\r\n--------\r\n\r\nThe base class for learners sets `self.learner_name = self.name` in `__init__`, where `self.name` is the widgets class attribute `name`, which is set later in the derived classes. So `learner_name = Setting(name)` or `learner_name = Setting(OWWidget.name)` won't work since the name is only defined later. Having `learner_name = Setting(\"\")` would require some magic.\r\n\r\n`OWBaseLearner` already has a meta class. This may be the point where the setting can be properly initialized since the `name` is already set then.\n", "before_files": [{"content": "from copy import deepcopy\n\nimport itertools\nimport numpy as np\n\nfrom AnyQt.QtCore import QTimer, Qt\n\nfrom Orange.data import Table\nfrom Orange.modelling import Fitter, Learner, Model\nfrom Orange.preprocess.preprocess import Preprocess\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting\nfrom Orange.widgets.utils import getmembers\nfrom Orange.widgets.utils.signals import Output, Input\nfrom Orange.widgets.utils.sql import check_sql_input\nfrom Orange.widgets.widget import OWWidget, WidgetMetaClass, Msg\n\n\nclass OWBaseLearnerMeta(WidgetMetaClass):\n \"\"\" Meta class for learner widgets\n\n OWBaseLearner declares two outputs, learner and model with\n generic type (Learner and Model).\n\n This metaclass ensures that each of the subclasses gets\n its own Outputs class with output that match the corresponding\n learner.\n \"\"\"\n def __new__(cls, name, bases, attributes):\n def abstract_widget():\n return not attributes.get(\"name\")\n\n def copy_outputs(template):\n result = type(\"Outputs\", (), {})\n for name, signal in getmembers(template, Output):\n setattr(result, name, deepcopy(signal))\n return result\n\n obj = super().__new__(cls, name, bases, attributes)\n if abstract_widget():\n return obj\n\n learner = attributes.get(\"LEARNER\")\n if not learner:\n raise AttributeError(\n \"'{}' must declare attribute LEARNER\".format(name))\n\n outputs = obj.Outputs = copy_outputs(obj.Outputs)\n outputs.learner.type = learner\n outputs.model.type = learner.__returns__\n\n return obj\n\n\nclass OWBaseLearner(OWWidget, metaclass=OWBaseLearnerMeta):\n \"\"\"Abstract widget for classification/regression learners.\n\n Notes\n -----\n All learner widgets should define learner class LEARNER.\n LEARNER should have __returns__ attribute.\n\n Overwrite `create_learner`, `add_main_layout` and `get_learner_parameters`\n in case LEARNER has extra parameters.\n\n \"\"\"\n LEARNER = None\n supports_sparse = True\n\n want_main_area = False\n resizing_enabled = False\n auto_apply = Setting(True)\n\n class Error(OWWidget.Error):\n data_error = Msg(\"{}\")\n fitting_failed = Msg(\"Fitting failed.\\n{}\")\n sparse_not_supported = Msg(\"Sparse data is not supported.\")\n out_of_memory = Msg(\"Out of memory.\")\n\n class Warning(OWWidget.Warning):\n outdated_learner = Msg(\"Press Apply to submit changes.\")\n\n class Inputs:\n data = Input(\"Data\", Table)\n preprocessor = Input(\"Preprocessor\", Preprocess)\n\n class Outputs:\n learner = Output(\"Learner\", Learner, dynamic=False)\n model = Output(\"Model\", Model, dynamic=False,\n replaces=[\"Classifier\", \"Predictor\"])\n\n OUTPUT_MODEL_NAME = Outputs.model.name # Attr for backcompat w/ self.send() code\n\n def __init__(self):\n super().__init__()\n self.data = None\n self.valid_data = False\n self.learner = None\n self.learner_name = self.name\n self.model = None\n self.preprocessors = None\n self.outdated_settings = False\n\n self.setup_layout()\n QTimer.singleShot(0, getattr(self, \"unconditional_apply\", self.apply))\n\n def create_learner(self):\n \"\"\"Creates a learner with current configuration.\n\n Returns:\n Learner: an instance of Orange.base.learner subclass.\n \"\"\"\n return self.LEARNER(preprocessors=self.preprocessors)\n\n def get_learner_parameters(self):\n \"\"\"Creates an `OrderedDict` or a sequence of pairs with current model\n configuration.\n\n Returns:\n OrderedDict or List: (option, value) pairs or dict\n \"\"\"\n return []\n\n @Inputs.preprocessor\n def set_preprocessor(self, preprocessor):\n self.preprocessors = preprocessor\n self.apply()\n\n @Inputs.data\n @check_sql_input\n def set_data(self, data):\n \"\"\"Set the input train data set.\"\"\"\n self.Error.data_error.clear()\n self.data = data\n if data is not None and data.domain.class_var is None:\n self.Error.data_error(\"Data has no target variable.\")\n self.data = None\n\n self.update_model()\n\n def apply(self):\n \"\"\"Applies learner and sends new model.\"\"\"\n self.update_learner()\n self.update_model()\n\n def update_learner(self):\n self.learner = self.create_learner()\n if self.learner and issubclass(self.LEARNER, Fitter):\n self.learner.use_default_preprocessors = True\n if self.learner is not None:\n self.learner.name = self.learner_name\n self.Outputs.learner.send(self.learner)\n self.outdated_settings = False\n self.Warning.outdated_learner.clear()\n\n def show_fitting_failed(self, exc):\n \"\"\"Show error when fitting fails.\n Derived widgets can override this to show more specific messages.\"\"\"\n self.Error.fitting_failed(str(exc), shown=exc is not None)\n\n def update_model(self):\n self.show_fitting_failed(None)\n self.model = None\n if self.check_data():\n try:\n self.model = self.learner(self.data)\n except BaseException as exc:\n self.show_fitting_failed(exc)\n else:\n self.model.name = self.learner_name\n self.model.instances = self.data\n self.Outputs.model.send(self.model)\n\n def check_data(self):\n self.valid_data = False\n self.Error.sparse_not_supported.clear()\n if self.data is not None and self.learner is not None:\n self.Error.data_error.clear()\n if not self.learner.check_learner_adequacy(self.data.domain):\n self.Error.data_error(self.learner.learner_adequacy_err_msg)\n elif not len(self.data):\n self.Error.data_error(\"Data set is empty.\")\n elif len(np.unique(self.data.Y)) < 2:\n self.Error.data_error(\"Data contains a single target value.\")\n elif self.data.X.size == 0:\n self.Error.data_error(\"Data has no features to learn from.\")\n elif self.data.is_sparse() and not self.supports_sparse:\n self.Error.sparse_not_supported()\n else:\n self.valid_data = True\n return self.valid_data\n\n def settings_changed(self, *args, **kwargs):\n self.outdated_settings = True\n self.Warning.outdated_learner(shown=not self.auto_apply)\n self.apply()\n\n def _change_name(self, instance, output):\n if instance:\n instance.name = self.learner_name\n if self.auto_apply:\n output.send(instance)\n\n def learner_name_changed(self):\n self._change_name(self.learner, self.Outputs.learner)\n self._change_name(self.model, self.Outputs.model)\n\n def send_report(self):\n self.report_items(((\"Name\", self.learner_name),))\n\n model_parameters = self.get_learner_parameters()\n if model_parameters:\n self.report_items(\"Model parameters\", model_parameters)\n\n if self.data:\n self.report_data(\"Data\", self.data)\n\n # GUI\n def setup_layout(self):\n self.add_learner_name_widget()\n self.add_main_layout()\n # Options specific to target variable type, if supported\n if issubclass(self.LEARNER, Fitter):\n # Only add a classification section if the method is overridden\n if type(self).add_classification_layout is not \\\n OWBaseLearner.add_classification_layout:\n classification_box = gui.widgetBox(\n self.controlArea, 'Classification')\n self.add_classification_layout(classification_box)\n # Only add a regression section if the method is overridden\n if type(self).add_regression_layout is not \\\n OWBaseLearner.add_regression_layout:\n regression_box = gui.widgetBox(self.controlArea, 'Regression')\n self.add_regression_layout(regression_box)\n self.add_bottom_buttons()\n\n def add_main_layout(self):\n \"\"\"Creates layout with the learner configuration widgets.\n\n Override this method for laying out any learner-specific parameter controls.\n See setup_layout() method for execution order.\n \"\"\"\n pass\n\n def add_classification_layout(self, box):\n \"\"\"Creates layout for classification specific options.\n\n If a widget outputs a learner dispatcher, sometimes the classification\n and regression learners require different options.\n See `setup_layout()` method for execution order.\n \"\"\"\n pass\n\n def add_regression_layout(self, box):\n \"\"\"Creates layout for regression specific options.\n\n If a widget outputs a learner dispatcher, sometimes the classification\n and regression learners require different options.\n See `setup_layout()` method for execution order.\n \"\"\"\n pass\n\n def add_learner_name_widget(self):\n self.name_line_edit = gui.lineEdit(\n self.controlArea, self, 'learner_name', box='Name',\n tooltip='The name will identify this model in other widgets',\n orientation=Qt.Horizontal, callback=self.learner_name_changed)\n\n def add_bottom_buttons(self):\n box = gui.hBox(self.controlArea, True)\n box.layout().addWidget(self.report_button)\n gui.separator(box, 15)\n self.apply_button = gui.auto_commit(box, self, 'auto_apply', '&Apply',\n box=False, commit=self.apply)\n\n def send(self, signalName, value, id=None):\n # A subclass might still use the old syntax to send outputs\n # defined on this class\n for _, output in getmembers(self.Outputs, Output):\n if output.name == signalName or signalName in output.replaces:\n output.send(value, id=id)\n return\n\n super().send(signalName, value, id)\n\n @classmethod\n def get_widget_description(cls):\n # When a subclass defines defines old-style signals, those override\n # the new-style ones, so we add them manually\n desc = super().get_widget_description()\n\n if cls.outputs:\n desc[\"outputs\"].extend(cls.get_signals(\"outputs\", True))\n if cls.inputs:\n desc[\"inputs\"].extend(cls.get_signals(\"inputs\", True))\n return desc\n", "path": "Orange/widgets/utils/owlearnerwidget.py"}]} | 3,686 | 188 |
gh_patches_debug_37684 | rasdani/github-patches | git_diff | pulp__pulpcore-4335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Need a way to directly migrate pulp_labels from 3.21 to 3.28
**Is your feature request related to a problem? Please describe.**
As RHUI developers, we are heavy users of pulp_labels on repository objects. As the current 4.5 release is on pulpcore 3.21, we would like our next release (4.6) to be based on pulpcore 3.28, and we need to ensure a smooth upgrade path for our customers. As it is currently laid out, the 3.21 to 3.28 migration path requires an intermediate step (<3.25) where `pulpcore-manager datarepair-labels` command is available.
**Describe the solution you'd like**
The ideal solution would be to make Django migrations take care of that automatically, without requiring an intermediate step/release. Since we live in non-ideal world, a solution where an additional command like `datarepair-labels` is available would also suffice.
**Describe alternatives you've considered**
We have considered an intermediate release of RHUI 4.6 with pulpcore 3.22 followed by RHUI 4.7 built on 3.28, but it would create a lot of migration issues for our customers, with many of them still running RHUI 4.0.
</issue>
<code>
[start of pulpcore/app/migrations/0104_delete_label.py]
1 # Generated by Django 3.2.18 on 2023-05-04 07:06
2
3 from django.db import migrations
4
5
6 def check_no_existing_labels(apps, schema_editor):
7 Label = apps.get_model("core", "Label")
8 if Label.objects.exists():
9 raise RuntimeError(
10 "There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted."
11 )
12
13
14 class Migration(migrations.Migration):
15
16 dependencies = [
17 ('core', '0103_alter_export_task'),
18 ]
19
20 operations = [
21 migrations.RunPython(
22 code=check_no_existing_labels,
23 reverse_code=migrations.RunPython.noop,
24 elidable=True,
25 ),
26 migrations.DeleteModel(
27 name='Label',
28 ),
29 ]
30
[end of pulpcore/app/migrations/0104_delete_label.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/migrations/0104_delete_label.py b/pulpcore/app/migrations/0104_delete_label.py
--- a/pulpcore/app/migrations/0104_delete_label.py
+++ b/pulpcore/app/migrations/0104_delete_label.py
@@ -1,6 +1,45 @@
# Generated by Django 3.2.18 on 2023-05-04 07:06
from django.db import migrations
+from django.db.models.expressions import OuterRef, RawSQL
+
+
+def migrate_remaining_labels(apps, schema_editor):
+ """
+ This data migration handles the "but what about plugins" problem noted in the issue [0], with only two caveats:
+
+ Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) "Repository",
+ that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then,
+ upon running this migration, those Labels would be lost.
+
+ Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository,
+ but named something like "PluginRepositoryButWhy", and that plugin allowed Labels,
+ and instances of such a Model had Labels associated with them - then this migration would fail,
+ because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped.
+
+ And the plugins described above would have to have existed and been in use with pulpcore/3.21,only -
+ if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well.
+
+ No such plugins/Models exist, to the best of our knowledge.
+
+ [0] https://github.com/pulp/pulpcore/issues/4319
+ """
+ Label = apps.get_model("core", "Label")
+ Repository = apps.get_model("core", "Repository")
+ Remote = apps.get_model("core", "Remote")
+ Distribution = apps.get_model("core", "Distribution")
+ ContentType = apps.get_model("contenttypes", "ContentType")
+
+ for master_model, model_name in [(Repository, "repository"), (Remote, "remote"), (Distribution, "distribution")]:
+ detail_ctypes = ContentType.objects.filter(app_label__ne="core", model__endswith=model_name)
+ affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values("object_id").distinct()
+ label_subq = Label.objects.filter(
+ content_type__in=detail_ctypes, object_id=OuterRef("pulp_id")
+ ).annotate(
+ label_data=RawSQL("hstore(array_agg(key), array_agg(value))", [])
+ ).values("label_data")
+ master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq)
+ Label.objects.filter(content_type__in=detail_ctypes).delete()
def check_no_existing_labels(apps, schema_editor):
@@ -18,11 +57,20 @@
]
operations = [
+ migrations.RunPython(
+ code=migrate_remaining_labels,
+ reverse_code=migrations.RunPython.noop,
+ elidable=True,
+ ),
migrations.RunPython(
code=check_no_existing_labels,
reverse_code=migrations.RunPython.noop,
elidable=True,
),
+ migrations.RunSQL(
+ sql="SET CONSTRAINTS ALL IMMEDIATE;",
+ reverse_sql="",
+ ),
migrations.DeleteModel(
name='Label',
),
| {"golden_diff": "diff --git a/pulpcore/app/migrations/0104_delete_label.py b/pulpcore/app/migrations/0104_delete_label.py\n--- a/pulpcore/app/migrations/0104_delete_label.py\n+++ b/pulpcore/app/migrations/0104_delete_label.py\n@@ -1,6 +1,45 @@\n # Generated by Django 3.2.18 on 2023-05-04 07:06\n \n from django.db import migrations\n+from django.db.models.expressions import OuterRef, RawSQL\n+\n+\n+def migrate_remaining_labels(apps, schema_editor):\n+ \"\"\"\n+ This data migration handles the \"but what about plugins\" problem noted in the issue [0], with only two caveats:\n+\n+ Case 1: If there were to exist a plugin containing a Model whose model-name ended in (for example) \"Repository\",\n+ that was NOT a detail-model of a Repository master-model, AND that plugin allowed Labels for such a model - then,\n+ upon running this migration, those Labels would be lost.\n+\n+ Case 2: If there were to exist a plugin containing a Model that was a Detail of (for example) Repository,\n+ but named something like \"PluginRepositoryButWhy\", and that plugin allowed Labels,\n+ and instances of such a Model had Labels associated with them - then this migration would fail,\n+ because the Labels would not be found, migrated, and deleted, and the old-Label table would not be able to be dropped.\n+\n+ And the plugins described above would have to have existed and been in use with pulpcore/3.21,only -\n+ if they appeared with core/3.22, they'd be using new-Labels and all would be (already) well.\n+\n+ No such plugins/Models exist, to the best of our knowledge.\n+\n+ [0] https://github.com/pulp/pulpcore/issues/4319\n+ \"\"\"\n+ Label = apps.get_model(\"core\", \"Label\")\n+ Repository = apps.get_model(\"core\", \"Repository\")\n+ Remote = apps.get_model(\"core\", \"Remote\")\n+ Distribution = apps.get_model(\"core\", \"Distribution\")\n+ ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n+\n+ for master_model, model_name in [(Repository, \"repository\"), (Remote, \"remote\"), (Distribution, \"distribution\")]:\n+ detail_ctypes = ContentType.objects.filter(app_label__ne=\"core\", model__endswith=model_name)\n+ affected_ids = Label.objects.filter(content_type__in=detail_ctypes).values(\"object_id\").distinct()\n+ label_subq = Label.objects.filter(\n+ content_type__in=detail_ctypes, object_id=OuterRef(\"pulp_id\")\n+ ).annotate(\n+ label_data=RawSQL(\"hstore(array_agg(key), array_agg(value))\", [])\n+ ).values(\"label_data\")\n+ master_model.objects.filter(pulp_id__in=affected_ids).update(pulp_labels=label_subq)\n+ Label.objects.filter(content_type__in=detail_ctypes).delete()\n \n \n def check_no_existing_labels(apps, schema_editor):\n@@ -18,11 +57,20 @@\n ]\n \n operations = [\n+ migrations.RunPython(\n+ code=migrate_remaining_labels,\n+ reverse_code=migrations.RunPython.noop,\n+ elidable=True,\n+ ),\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n+ migrations.RunSQL(\n+ sql=\"SET CONSTRAINTS ALL IMMEDIATE;\",\n+ reverse_sql=\"\",\n+ ),\n migrations.DeleteModel(\n name='Label',\n ),\n", "issue": "Need a way to directly migrate pulp_labels from 3.21 to 3.28\n**Is your feature request related to a problem? Please describe.**\r\nAs RHUI developers, we are heavy users of pulp_labels on repository objects. As the current 4.5 release is on pulpcore 3.21, we would like our next release (4.6) to be based on pulpcore 3.28, and we need to ensure a smooth upgrade path for our customers. As it is currently laid out, the 3.21 to 3.28 migration path requires an intermediate step (<3.25) where `pulpcore-manager datarepair-labels` command is available.\r\n\r\n**Describe the solution you'd like**\r\nThe ideal solution would be to make Django migrations take care of that automatically, without requiring an intermediate step/release. Since we live in non-ideal world, a solution where an additional command like `datarepair-labels` is available would also suffice.\r\n\r\n**Describe alternatives you've considered**\r\nWe have considered an intermediate release of RHUI 4.6 with pulpcore 3.22 followed by RHUI 4.7 built on 3.28, but it would create a lot of migration issues for our customers, with many of them still running RHUI 4.0.\r\n\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.18 on 2023-05-04 07:06\n\nfrom django.db import migrations\n\n\ndef check_no_existing_labels(apps, schema_editor):\n Label = apps.get_model(\"core\", \"Label\")\n if Label.objects.exists():\n raise RuntimeError(\n \"There are remaining labels. Please revert to pulpcore<3.25 and make sure all labels are properly mirgated or deleted.\"\n )\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0103_alter_export_task'),\n ]\n\n operations = [\n migrations.RunPython(\n code=check_no_existing_labels,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n ),\n migrations.DeleteModel(\n name='Label',\n ),\n ]\n", "path": "pulpcore/app/migrations/0104_delete_label.py"}]} | 1,069 | 814 |
gh_patches_debug_11491 | rasdani/github-patches | git_diff | scikit-hep__pyhf-444 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyhf commandline tools requires uproot extra
# Description
just issuing `pyhf --help` requires the uproot extra since `commandline.py` imports `writexml`.
# Expected Behavior
I don't need uproot if I don't want to use json2xml or xml2json
# Actual Behavior
I can't use `pyhf` without installing uproot.
# Steps to Reproduce
install master and run pyhf
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
</issue>
<code>
[start of pyhf/commandline.py]
1 import logging
2
3 import click
4 import json
5 import os
6
7 from . import writexml
8 from .utils import hypotest
9 from .pdf import Workspace
10 from .version import __version__
11
12 logging.basicConfig()
13 log = logging.getLogger(__name__)
14
15 # This is only needed for Python 2/3 compatibility
16 def ensure_dirs(path):
17 try:
18 os.makedirs(path, exist_ok=True)
19 except TypeError:
20 if not os.path.exists(path):
21 os.makedirs(path)
22
23
24 @click.group(context_settings=dict(help_option_names=['-h', '--help']))
25 @click.version_option(version=__version__)
26 def pyhf():
27 pass
28
29
30 @pyhf.command()
31 @click.argument('entrypoint-xml', type=click.Path(exists=True))
32 @click.option(
33 '--basedir',
34 help='The base directory for the XML files to point relative to.',
35 type=click.Path(exists=True),
36 default=os.getcwd(),
37 )
38 @click.option(
39 '--output-file',
40 help='The location of the output json file. If not specified, prints to screen.',
41 default=None,
42 )
43 @click.option('--track-progress/--hide-progress', default=True)
44 def xml2json(entrypoint_xml, basedir, output_file, track_progress):
45 """ Entrypoint XML: The top-level XML file for the PDF definition. """
46 try:
47 import uproot
48
49 assert uproot
50 except ImportError:
51 log.error(
52 "xml2json requires uproot, please install pyhf using the "
53 "xmlio extra: pip install pyhf[xmlio] or install uproot "
54 "manually: pip install uproot"
55 )
56 from . import readxml
57
58 spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)
59 if output_file is None:
60 print(json.dumps(spec, indent=4, sort_keys=True))
61 else:
62 with open(output_file, 'w+') as out_file:
63 json.dump(spec, out_file, indent=4, sort_keys=True)
64 log.debug("Written to {0:s}".format(output_file))
65
66
67 @pyhf.command()
68 @click.argument('workspace', default='-')
69 @click.option('--output-dir', type=click.Path(exists=True), default='.')
70 @click.option('--specroot', default='config')
71 @click.option('--dataroot', default='data')
72 @click.option('--resultprefix', default='FitConfig')
73 def json2xml(workspace, output_dir, specroot, dataroot, resultprefix):
74 try:
75 import uproot
76
77 assert uproot
78 except ImportError:
79 log.error(
80 "json2xml requires uproot, please install pyhf using the "
81 "xmlio extra: pip install pyhf[xmlio] or install uproot "
82 "manually: pip install uproot"
83 )
84
85 ensure_dirs(output_dir)
86 with click.open_file(workspace, 'r') as specstream:
87 d = json.load(specstream)
88 ensure_dirs(os.path.join(output_dir, specroot))
89 ensure_dirs(os.path.join(output_dir, dataroot))
90 with click.open_file(
91 os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w'
92 ) as outstream:
93 outstream.write(
94 writexml.writexml(
95 d,
96 os.path.join(output_dir, specroot),
97 os.path.join(output_dir, dataroot),
98 resultprefix,
99 ).decode('utf-8')
100 )
101
102
103 @pyhf.command()
104 @click.argument('workspace', default='-')
105 @click.option(
106 '--output-file',
107 help='The location of the output json file. If not specified, prints to screen.',
108 default=None,
109 )
110 @click.option('--measurement', default=None)
111 @click.option('-p', '--patch', multiple=True)
112 @click.option('--testpoi', default=1.0)
113 def cls(workspace, output_file, measurement, patch, testpoi):
114 with click.open_file(workspace, 'r') as specstream:
115 wspec = json.load(specstream)
116
117 w = Workspace(wspec)
118
119 patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]
120 p = w.model(measurement_name=measurement, patches=patches)
121 result = hypotest(testpoi, w.data(p), p, return_expected_set=True)
122 result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}
123 if output_file is None:
124 print(json.dumps(result, indent=4, sort_keys=True))
125 else:
126 with open(output_file, 'w+') as out_file:
127 json.dump(result, out_file, indent=4, sort_keys=True)
128 log.debug("Written to {0:s}".format(output_file))
129
[end of pyhf/commandline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyhf/commandline.py b/pyhf/commandline.py
--- a/pyhf/commandline.py
+++ b/pyhf/commandline.py
@@ -4,7 +4,6 @@
import json
import os
-from . import writexml
from .utils import hypotest
from .pdf import Workspace
from .version import __version__
@@ -81,6 +80,7 @@
"xmlio extra: pip install pyhf[xmlio] or install uproot "
"manually: pip install uproot"
)
+ from . import writexml
ensure_dirs(output_dir)
with click.open_file(workspace, 'r') as specstream:
| {"golden_diff": "diff --git a/pyhf/commandline.py b/pyhf/commandline.py\n--- a/pyhf/commandline.py\n+++ b/pyhf/commandline.py\n@@ -4,7 +4,6 @@\n import json\n import os\n \n-from . import writexml\n from .utils import hypotest\n from .pdf import Workspace\n from .version import __version__\n@@ -81,6 +80,7 @@\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n+ from . import writexml\n \n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n", "issue": "pyhf commandline tools requires uproot extra\n# Description\r\n\r\njust issuing `pyhf --help` requires the uproot extra since `commandline.py` imports `writexml`.\r\n\r\n# Expected Behavior\r\n\r\nI don't need uproot if I don't want to use json2xml or xml2json\r\n\r\n# Actual Behavior\r\n\r\nI can't use `pyhf` without installing uproot.\r\n\r\n# Steps to Reproduce\r\n\r\ninstall master and run pyhf\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "import logging\n\nimport click\nimport json\nimport os\n\nfrom . import writexml\nfrom .utils import hypotest\nfrom .pdf import Workspace\nfrom .version import __version__\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n# This is only needed for Python 2/3 compatibility\ndef ensure_dirs(path):\n try:\n os.makedirs(path, exist_ok=True)\n except TypeError:\n if not os.path.exists(path):\n os.makedirs(path)\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\[email protected]_option(version=__version__)\ndef pyhf():\n pass\n\n\[email protected]()\[email protected]('entrypoint-xml', type=click.Path(exists=True))\[email protected](\n '--basedir',\n help='The base directory for the XML files to point relative to.',\n type=click.Path(exists=True),\n default=os.getcwd(),\n)\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--track-progress/--hide-progress', default=True)\ndef xml2json(entrypoint_xml, basedir, output_file, track_progress):\n \"\"\" Entrypoint XML: The top-level XML file for the PDF definition. \"\"\"\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"xml2json requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n from . import readxml\n\n spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)\n if output_file is None:\n print(json.dumps(spec, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(spec, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected]('--output-dir', type=click.Path(exists=True), default='.')\[email protected]('--specroot', default='config')\[email protected]('--dataroot', default='data')\[email protected]('--resultprefix', default='FitConfig')\ndef json2xml(workspace, output_dir, specroot, dataroot, resultprefix):\n try:\n import uproot\n\n assert uproot\n except ImportError:\n log.error(\n \"json2xml requires uproot, please install pyhf using the \"\n \"xmlio extra: pip install pyhf[xmlio] or install uproot \"\n \"manually: pip install uproot\"\n )\n\n ensure_dirs(output_dir)\n with click.open_file(workspace, 'r') as specstream:\n d = json.load(specstream)\n ensure_dirs(os.path.join(output_dir, specroot))\n ensure_dirs(os.path.join(output_dir, dataroot))\n with click.open_file(\n os.path.join(output_dir, '{0:s}.xml'.format(resultprefix)), 'w'\n ) as outstream:\n outstream.write(\n writexml.writexml(\n d,\n os.path.join(output_dir, specroot),\n os.path.join(output_dir, dataroot),\n resultprefix,\n ).decode('utf-8')\n )\n\n\[email protected]()\[email protected]('workspace', default='-')\[email protected](\n '--output-file',\n help='The location of the output json file. If not specified, prints to screen.',\n default=None,\n)\[email protected]('--measurement', default=None)\[email protected]('-p', '--patch', multiple=True)\[email protected]('--testpoi', default=1.0)\ndef cls(workspace, output_file, measurement, patch, testpoi):\n with click.open_file(workspace, 'r') as specstream:\n wspec = json.load(specstream)\n\n w = Workspace(wspec)\n\n patches = [json.loads(click.open_file(pfile, 'r').read()) for pfile in patch]\n p = w.model(measurement_name=measurement, patches=patches)\n result = hypotest(testpoi, w.data(p), p, return_expected_set=True)\n result = {'CLs_obs': result[0].tolist()[0], 'CLs_exp': result[-1].ravel().tolist()}\n if output_file is None:\n print(json.dumps(result, indent=4, sort_keys=True))\n else:\n with open(output_file, 'w+') as out_file:\n json.dump(result, out_file, indent=4, sort_keys=True)\n log.debug(\"Written to {0:s}\".format(output_file))\n", "path": "pyhf/commandline.py"}]} | 2,010 | 148 |
gh_patches_debug_1273 | rasdani/github-patches | git_diff | ivy-llc__ivy-17162 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
is_integer
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/attribute.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6
7
8 @to_ivy_arrays_and_back
9 def is_complex(x):
10 return ivy.is_complex_dtype(x)
11
12
13 @to_ivy_arrays_and_back
14 def is_floating_point(x):
15 return ivy.is_float_dtype(x)
16
[end of ivy/functional/frontends/paddle/tensor/attribute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py
--- a/ivy/functional/frontends/paddle/tensor/attribute.py
+++ b/ivy/functional/frontends/paddle/tensor/attribute.py
@@ -10,6 +10,11 @@
return ivy.is_complex_dtype(x)
+@to_ivy_arrays_and_back
+def is_integer(x):
+ return ivy.is_int_dtype(x)
+
+
@to_ivy_arrays_and_back
def is_floating_point(x):
return ivy.is_float_dtype(x)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/attribute.py b/ivy/functional/frontends/paddle/tensor/attribute.py\n--- a/ivy/functional/frontends/paddle/tensor/attribute.py\n+++ b/ivy/functional/frontends/paddle/tensor/attribute.py\n@@ -10,6 +10,11 @@\n return ivy.is_complex_dtype(x)\n \n \n+@to_ivy_arrays_and_back\n+def is_integer(x):\n+ return ivy.is_int_dtype(x)\n+\n+\n @to_ivy_arrays_and_back\n def is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "issue": "is_integer\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef is_complex(x):\n return ivy.is_complex_dtype(x)\n\n\n@to_ivy_arrays_and_back\ndef is_floating_point(x):\n return ivy.is_float_dtype(x)\n", "path": "ivy/functional/frontends/paddle/tensor/attribute.py"}]} | 655 | 138 |
gh_patches_debug_28376 | rasdani/github-patches | git_diff | iterative__dvc-4075 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement `--no-exec` option for `import-url` command
`dvc import-url` creates new `.dvc` file, just as `dvc run`. Sometimes files which would be imported are already present locally and it's quite inconvenient that they should be downloaded again in order to create a pipeline step.
Because of that it would be great to add `--no-exec` option: we create pipeline step, then use `dvc commit` to update its md5 with already downloaded file.
</issue>
<code>
[start of dvc/repo/imp_url.py]
1 import os
2
3 from dvc.repo.scm_context import scm_context
4 from dvc.utils import relpath, resolve_output, resolve_paths
5 from dvc.utils.fs import path_isin
6
7 from ..exceptions import OutputDuplicationError
8 from . import locked
9
10
11 @locked
12 @scm_context
13 def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):
14 from dvc.dvcfile import Dvcfile
15 from dvc.stage import Stage, create_stage
16
17 out = resolve_output(url, out)
18 path, wdir, out = resolve_paths(self, out)
19
20 # NOTE: when user is importing something from within their own repository
21 if (
22 erepo is None
23 and os.path.exists(url)
24 and path_isin(os.path.abspath(url), self.root_dir)
25 ):
26 url = relpath(url, wdir)
27
28 stage = create_stage(
29 Stage,
30 self,
31 fname or path,
32 wdir=wdir,
33 deps=[url],
34 outs=[out],
35 erepo=erepo,
36 )
37
38 if stage is None:
39 return None
40
41 dvcfile = Dvcfile(self, stage.path)
42 dvcfile.remove()
43
44 try:
45 self.check_modified_graph([stage])
46 except OutputDuplicationError as exc:
47 raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})
48
49 stage.run()
50
51 stage.frozen = frozen
52
53 dvcfile.dump(stage)
54
55 return stage
56
[end of dvc/repo/imp_url.py]
[start of dvc/command/imp_url.py]
1 import argparse
2 import logging
3
4 from dvc.command import completion
5 from dvc.command.base import CmdBase, append_doc_link
6 from dvc.exceptions import DvcException
7
8 logger = logging.getLogger(__name__)
9
10
11 class CmdImportUrl(CmdBase):
12 def run(self):
13 try:
14 self.repo.imp_url(
15 self.args.url, out=self.args.out, fname=self.args.file
16 )
17 except DvcException:
18 logger.exception(
19 "failed to import {}. You could also try downloading "
20 "it manually, and adding it with `dvc add`.".format(
21 self.args.url
22 )
23 )
24 return 1
25 return 0
26
27
28 def add_parser(subparsers, parent_parser):
29 IMPORT_HELP = (
30 "Download or copy file from URL and take it under DVC control."
31 )
32
33 import_parser = subparsers.add_parser(
34 "import-url",
35 parents=[parent_parser],
36 description=append_doc_link(IMPORT_HELP, "import-url"),
37 help=IMPORT_HELP,
38 formatter_class=argparse.RawTextHelpFormatter,
39 )
40 import_parser.add_argument(
41 "url",
42 help="Supported urls:\n"
43 "/path/to/file\n"
44 "/path/to/directory\n"
45 "C:\\\\path\\to\\file\n"
46 "C:\\\\path\\to\\directory\n"
47 "https://example.com/path/to/file\n"
48 "s3://bucket/path/to/file\n"
49 "s3://bucket/path/to/directory\n"
50 "gs://bucket/path/to/file\n"
51 "gs://bucket/path/to/directory\n"
52 "hdfs://example.com/path/to/file\n"
53 "ssh://example.com:/path/to/file\n"
54 "ssh://example.com:/path/to/directory\n"
55 "remote://myremote/path/to/file (see `dvc remote`)",
56 )
57 import_parser.add_argument(
58 "out",
59 nargs="?",
60 help="Destination path to put files to.",
61 choices=completion.Optional.DIR,
62 )
63 import_parser.add_argument(
64 "--file",
65 help="Specify name of the DVC-file this command will generate.",
66 metavar="<filename>",
67 choices=completion.Optional.DIR,
68 )
69 import_parser.set_defaults(func=CmdImportUrl)
70
[end of dvc/command/imp_url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/command/imp_url.py b/dvc/command/imp_url.py
--- a/dvc/command/imp_url.py
+++ b/dvc/command/imp_url.py
@@ -12,7 +12,10 @@
def run(self):
try:
self.repo.imp_url(
- self.args.url, out=self.args.out, fname=self.args.file
+ self.args.url,
+ out=self.args.out,
+ fname=self.args.file,
+ no_exec=self.args.no_exec,
)
except DvcException:
logger.exception(
@@ -66,4 +69,10 @@
metavar="<filename>",
choices=completion.Optional.DIR,
)
+ import_parser.add_argument(
+ "--no-exec",
+ action="store_true",
+ default=False,
+ help="Only create stage file without actually download it.",
+ )
import_parser.set_defaults(func=CmdImportUrl)
diff --git a/dvc/repo/imp_url.py b/dvc/repo/imp_url.py
--- a/dvc/repo/imp_url.py
+++ b/dvc/repo/imp_url.py
@@ -10,7 +10,9 @@
@locked
@scm_context
-def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):
+def imp_url(
+ self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False
+):
from dvc.dvcfile import Dvcfile
from dvc.stage import Stage, create_stage
@@ -46,7 +48,10 @@
except OutputDuplicationError as exc:
raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})
- stage.run()
+ if no_exec:
+ stage.ignore_outs()
+ else:
+ stage.run()
stage.frozen = frozen
| {"golden_diff": "diff --git a/dvc/command/imp_url.py b/dvc/command/imp_url.py\n--- a/dvc/command/imp_url.py\n+++ b/dvc/command/imp_url.py\n@@ -12,7 +12,10 @@\n def run(self):\n try:\n self.repo.imp_url(\n- self.args.url, out=self.args.out, fname=self.args.file\n+ self.args.url,\n+ out=self.args.out,\n+ fname=self.args.file,\n+ no_exec=self.args.no_exec,\n )\n except DvcException:\n logger.exception(\n@@ -66,4 +69,10 @@\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n+ import_parser.add_argument(\n+ \"--no-exec\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Only create stage file without actually download it.\",\n+ )\n import_parser.set_defaults(func=CmdImportUrl)\ndiff --git a/dvc/repo/imp_url.py b/dvc/repo/imp_url.py\n--- a/dvc/repo/imp_url.py\n+++ b/dvc/repo/imp_url.py\n@@ -10,7 +10,9 @@\n \n @locked\n @scm_context\n-def imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):\n+def imp_url(\n+ self, url, out=None, fname=None, erepo=None, frozen=True, no_exec=False\n+):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n \n@@ -46,7 +48,10 @@\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n \n- stage.run()\n+ if no_exec:\n+ stage.ignore_outs()\n+ else:\n+ stage.run()\n \n stage.frozen = frozen\n", "issue": "Implement `--no-exec` option for `import-url` command\n`dvc import-url` creates new `.dvc` file, just as `dvc run`. Sometimes files which would be imported are already present locally and it's quite inconvenient that they should be downloaded again in order to create a pipeline step.\r\n\r\nBecause of that it would be great to add `--no-exec` option: we create pipeline step, then use `dvc commit` to update its md5 with already downloaded file.\n", "before_files": [{"content": "import os\n\nfrom dvc.repo.scm_context import scm_context\nfrom dvc.utils import relpath, resolve_output, resolve_paths\nfrom dvc.utils.fs import path_isin\n\nfrom ..exceptions import OutputDuplicationError\nfrom . import locked\n\n\n@locked\n@scm_context\ndef imp_url(self, url, out=None, fname=None, erepo=None, frozen=True):\n from dvc.dvcfile import Dvcfile\n from dvc.stage import Stage, create_stage\n\n out = resolve_output(url, out)\n path, wdir, out = resolve_paths(self, out)\n\n # NOTE: when user is importing something from within their own repository\n if (\n erepo is None\n and os.path.exists(url)\n and path_isin(os.path.abspath(url), self.root_dir)\n ):\n url = relpath(url, wdir)\n\n stage = create_stage(\n Stage,\n self,\n fname or path,\n wdir=wdir,\n deps=[url],\n outs=[out],\n erepo=erepo,\n )\n\n if stage is None:\n return None\n\n dvcfile = Dvcfile(self, stage.path)\n dvcfile.remove()\n\n try:\n self.check_modified_graph([stage])\n except OutputDuplicationError as exc:\n raise OutputDuplicationError(exc.output, set(exc.stages) - {stage})\n\n stage.run()\n\n stage.frozen = frozen\n\n dvcfile.dump(stage)\n\n return stage\n", "path": "dvc/repo/imp_url.py"}, {"content": "import argparse\nimport logging\n\nfrom dvc.command import completion\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdImportUrl(CmdBase):\n def run(self):\n try:\n self.repo.imp_url(\n self.args.url, out=self.args.out, fname=self.args.file\n )\n except DvcException:\n logger.exception(\n \"failed to import {}. You could also try downloading \"\n \"it manually, and adding it with `dvc add`.\".format(\n self.args.url\n )\n )\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n IMPORT_HELP = (\n \"Download or copy file from URL and take it under DVC control.\"\n )\n\n import_parser = subparsers.add_parser(\n \"import-url\",\n parents=[parent_parser],\n description=append_doc_link(IMPORT_HELP, \"import-url\"),\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n import_parser.add_argument(\n \"url\",\n help=\"Supported urls:\\n\"\n \"/path/to/file\\n\"\n \"/path/to/directory\\n\"\n \"C:\\\\\\\\path\\\\to\\\\file\\n\"\n \"C:\\\\\\\\path\\\\to\\\\directory\\n\"\n \"https://example.com/path/to/file\\n\"\n \"s3://bucket/path/to/file\\n\"\n \"s3://bucket/path/to/directory\\n\"\n \"gs://bucket/path/to/file\\n\"\n \"gs://bucket/path/to/directory\\n\"\n \"hdfs://example.com/path/to/file\\n\"\n \"ssh://example.com:/path/to/file\\n\"\n \"ssh://example.com:/path/to/directory\\n\"\n \"remote://myremote/path/to/file (see `dvc remote`)\",\n )\n import_parser.add_argument(\n \"out\",\n nargs=\"?\",\n help=\"Destination path to put files to.\",\n choices=completion.Optional.DIR,\n )\n import_parser.add_argument(\n \"--file\",\n help=\"Specify name of the DVC-file this command will generate.\",\n metavar=\"<filename>\",\n choices=completion.Optional.DIR,\n )\n import_parser.set_defaults(func=CmdImportUrl)\n", "path": "dvc/command/imp_url.py"}]} | 1,710 | 418 |
gh_patches_debug_37644 | rasdani/github-patches | git_diff | SigmaHQ__sigma-257 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add possibility to configure backend options in conversion configuration file
</issue>
<code>
[start of tools/sigma/backends/base.py]
1 # Output backends for sigmac
2 # Copyright 2016-2018 Thomas Patzke, Florian Roth
3
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Lesser General Public License as published by
6 # the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Lesser General Public License for more details.
13
14 # You should have received a copy of the GNU Lesser General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import sigma
18 from .mixins import RulenameCommentMixin, QuoteCharMixin
19
20 class BackendOptions(dict):
21 """Object contains all options that should be passed to the backend from command line (or other user interfaces)"""
22
23 def __init__(self, options):
24 """
25 Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:
26
27 * key=value: self{key} = value
28 * key: self{key} = True
29 """
30 if options == None:
31 return
32 for option in options:
33 parsed = option.split("=", 1)
34 try:
35 self[parsed[0]] = parsed[1]
36 except IndexError:
37 self[parsed[0]] = True
38
39 ### Generic backend base classes
40 class BaseBackend:
41 """Base class for all backends"""
42 identifier = "base"
43 active = False
44 index_field = None # field name that is used to address indices
45 file_list = None
46 options = tuple() # a list of tuples with following elements: option name, default value, help text, target attribute name (option name if None)
47
48 def __init__(self, sigmaconfig, backend_options=None):
49 """
50 Initialize backend. This gets a sigmaconfig object, which is notified about the used backend class by
51 passing the object instance to it.
52 """
53 super().__init__()
54 if not isinstance(sigmaconfig, (sigma.configuration.SigmaConfiguration, None)):
55 raise TypeError("SigmaConfiguration object expected")
56 self.backend_options = backend_options
57 self.sigmaconfig = sigmaconfig
58 self.sigmaconfig.set_backend(self)
59
60 # Parse options
61 for option, default_value, _, target in self.options:
62 if target is None:
63 target = option
64 setattr(self, target, self.backend_options.setdefault(option, default_value))
65
66 def generate(self, sigmaparser):
67 """Method is called for each sigma rule and receives the parsed rule (SigmaParser)"""
68 for parsed in sigmaparser.condparsed:
69 query = self.generateQuery(parsed)
70 before = self.generateBefore(parsed)
71 after = self.generateAfter(parsed)
72
73 result = ""
74 if before is not None:
75 result = before
76 if query is not None:
77 result += query
78 if after is not None:
79 result += after
80
81 return result
82
83 def generateQuery(self, parsed):
84 result = self.generateNode(parsed.parsedSearch)
85 if parsed.parsedAgg:
86 result += self.generateAggregation(parsed.parsedAgg)
87 return result
88
89 def generateNode(self, node):
90 if type(node) == sigma.parser.condition.ConditionAND:
91 return self.generateANDNode(node)
92 elif type(node) == sigma.parser.condition.ConditionOR:
93 return self.generateORNode(node)
94 elif type(node) == sigma.parser.condition.ConditionNOT:
95 return self.generateNOTNode(node)
96 elif type(node) == sigma.parser.condition.ConditionNULLValue:
97 return self.generateNULLValueNode(node)
98 elif type(node) == sigma.parser.condition.ConditionNotNULLValue:
99 return self.generateNotNULLValueNode(node)
100 elif type(node) == sigma.parser.condition.NodeSubexpression:
101 return self.generateSubexpressionNode(node)
102 elif type(node) == tuple:
103 return self.generateMapItemNode(node)
104 elif type(node) in (str, int):
105 return self.generateValueNode(node)
106 elif type(node) == list:
107 return self.generateListNode(node)
108 else:
109 raise TypeError("Node type %s was not expected in Sigma parse tree" % (str(type(node))))
110
111 def generateANDNode(self, node):
112 raise NotImplementedError("Node type not implemented for this backend")
113
114 def generateORNode(self, node):
115 raise NotImplementedError("Node type not implemented for this backend")
116
117 def generateNOTNode(self, node):
118 raise NotImplementedError("Node type not implemented for this backend")
119
120 def generateSubexpressionNode(self, node):
121 raise NotImplementedError("Node type not implemented for this backend")
122
123 def generateListNode(self, node):
124 raise NotImplementedError("Node type not implemented for this backend")
125
126 def generateMapItemNode(self, node):
127 raise NotImplementedError("Node type not implemented for this backend")
128
129 def generateValueNode(self, node):
130 raise NotImplementedError("Node type not implemented for this backend")
131
132 def generateNULLValueNode(self, node):
133 raise NotImplementedError("Node type not implemented for this backend")
134
135 def generateNotNULLValueNode(self, node):
136 raise NotImplementedError("Node type not implemented for this backend")
137
138 def generateAggregation(self, agg):
139 raise NotImplementedError("Aggregations not implemented for this backend")
140
141 def generateBefore(self, parsed):
142 return ""
143
144 def generateAfter(self, parsed):
145 return ""
146
147 def finalize(self):
148 """
149 Is called after the last file was processed with generate(). The right place if this backend is not intended to
150 look isolated at each rule, but generates an output which incorporates multiple rules, e.g. dashboards.
151 """
152 pass
153
154 class SingleTextQueryBackend(RulenameCommentMixin, BaseBackend, QuoteCharMixin):
155 """Base class for backends that generate one text-based expression from a Sigma rule"""
156 identifier = "base-textquery"
157 active = False
158
159 # the following class variables define the generation and behavior of queries from a parse tree some are prefilled with default values that are quite usual
160 andToken = None # Token used for linking expressions with logical AND
161 orToken = None # Same for OR
162 notToken = None # Same for NOT
163 subExpression = None # Syntax for subexpressions, usually parenthesis around it. %s is inner expression
164 listExpression = None # Syntax for lists, %s are list items separated with listSeparator
165 listSeparator = None # Character for separation of list items
166 valueExpression = None # Expression of values, %s represents value
167 nullExpression = None # Expression of queries for null values or non-existing fields. %s is field name
168 notNullExpression = None # Expression of queries for not null values. %s is field name
169 mapExpression = None # Syntax for field/value conditions. First %s is fieldname, second is value
170 mapListsSpecialHandling = False # Same handling for map items with list values as for normal values (strings, integers) if True, generateMapItemListNode method is called with node
171 mapListValueExpression = None # Syntax for field/value condititons where map value is a list
172
173 def generateANDNode(self, node):
174 generated = [ self.generateNode(val) for val in node ]
175 filtered = [ g for g in generated if g is not None ]
176 if filtered:
177 return self.andToken.join(filtered)
178 else:
179 return None
180
181 def generateORNode(self, node):
182 generated = [ self.generateNode(val) for val in node ]
183 filtered = [ g for g in generated if g is not None ]
184 if filtered:
185 return self.orToken.join(filtered)
186 else:
187 return None
188
189 def generateNOTNode(self, node):
190 generated = self.generateNode(node.item)
191 if generated is not None:
192 return self.notToken + generated
193 else:
194 return None
195
196 def generateSubexpressionNode(self, node):
197 generated = self.generateNode(node.items)
198 if generated:
199 return self.subExpression % generated
200 else:
201 return None
202
203 def generateListNode(self, node):
204 if not set([type(value) for value in node]).issubset({str, int}):
205 raise TypeError("List values must be strings or numbers")
206 return self.listExpression % (self.listSeparator.join([self.generateNode(value) for value in node]))
207
208 def generateMapItemNode(self, node):
209 fieldname, value = node
210
211 transformed_fieldname = self.fieldNameMapping(fieldname, value)
212 if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):
213 return self.mapExpression % (transformed_fieldname, self.generateNode(value))
214 elif type(value) == list:
215 return self.generateMapItemListNode(transformed_fieldname, value)
216 else:
217 raise TypeError("Backend does not support map values of type " + str(type(value)))
218
219 def generateMapItemListNode(self, fieldname, value):
220 return self.mapListValueExpression % (fieldname, self.generateNode(value))
221
222 def generateValueNode(self, node):
223 return self.valueExpression % (self.cleanValue(str(node)))
224
225 def generateNULLValueNode(self, node):
226 return self.nullExpression % (node.item)
227
228 def generateNotNULLValueNode(self, node):
229 return self.notNullExpression % (node.item)
230
231 def fieldNameMapping(self, fieldname, value):
232 """
233 Alter field names depending on the value(s). Backends may use this method to perform a final transformation of the field name
234 in addition to the field mapping defined in the conversion configuration. The field name passed to this method was already
235 transformed from the original name given in the Sigma rule.
236 """
237 return fieldname
238
[end of tools/sigma/backends/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/sigma/backends/base.py b/tools/sigma/backends/base.py
--- a/tools/sigma/backends/base.py
+++ b/tools/sigma/backends/base.py
@@ -14,28 +14,73 @@
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import sys
+
import sigma
+import yaml
+
from .mixins import RulenameCommentMixin, QuoteCharMixin
+
class BackendOptions(dict):
- """Object contains all options that should be passed to the backend from command line (or other user interfaces)"""
+ """
+ Object containing all the options that should be passed to the backend.
+
+ The options can come from command line and a YAML configuration file, and will be merged together.
+ Options from the command line take precedence.
+ """
+
+ def __init__(self, options, config_file):
+ """
+ :param options: unparsed options coming from the CLI
+ :param config_file: path to a YAML configuration file
+ """
+
+ self._load_config_file(config_file)
+ self._parse_options(options)
- def __init__(self, options):
+ def _parse_options(self, options):
"""
- Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:
+ Populates options from the unparsed options of the CLI
- * key=value: self{key} = value
- * key: self{key} = True
+ :param options: list unparsed options from the CLI.
+ Each option can have one of the following formats:
+ - "key=value": the option key:value will be passed to the backend
+ - "key": the option key:True will be passed to the backend
"""
- if options == None:
+
+ if options is None:
return
+
for option in options:
parsed = option.split("=", 1)
try:
self[parsed[0]] = parsed[1]
except IndexError:
+ # If the option is present but doesn't map to a value, treat it as a boolean flag
self[parsed[0]] = True
+ def _load_config_file(self, path):
+ """
+ Populates options from a configuration file
+
+ :param path: Path to the configuration file
+ """
+ if path is None:
+ return
+
+ try:
+ with open(path, 'r') as config_file:
+ backend_config = yaml.safe_load(config_file.read())
+ for key in backend_config:
+ self[key] = backend_config[key]
+ except (IOError, OSError) as e:
+ print("Failed to open backend configuration file '%s': %s" % (path, str(e)), file=sys.stderr)
+ exit(1)
+ except yaml.YAMLError as e:
+ print("Failed to parse backend configuration file '%s' as valid YAML: %s" % (path, str(e)), file=sys.stderr)
+ exit(1)
+
### Generic backend base classes
class BaseBackend:
"""Base class for all backends"""
| {"golden_diff": "diff --git a/tools/sigma/backends/base.py b/tools/sigma/backends/base.py\n--- a/tools/sigma/backends/base.py\n+++ b/tools/sigma/backends/base.py\n@@ -14,28 +14,73 @@\n # You should have received a copy of the GNU Lesser General Public License\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n \n+import sys\n+\n import sigma\n+import yaml\n+\n from .mixins import RulenameCommentMixin, QuoteCharMixin\n \n+\n class BackendOptions(dict):\n- \"\"\"Object contains all options that should be passed to the backend from command line (or other user interfaces)\"\"\"\n+ \"\"\"\n+ Object containing all the options that should be passed to the backend.\n+ \n+ The options can come from command line and a YAML configuration file, and will be merged together.\n+ Options from the command line take precedence.\n+ \"\"\"\n+\n+ def __init__(self, options, config_file):\n+ \"\"\"\n+ :param options: unparsed options coming from the CLI\n+ :param config_file: path to a YAML configuration file\n+ \"\"\"\n+\n+ self._load_config_file(config_file)\n+ self._parse_options(options)\n \n- def __init__(self, options):\n+ def _parse_options(self, options):\n \"\"\"\n- Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:\n+ Populates options from the unparsed options of the CLI\n \n- * key=value: self{key} = value\n- * key: self{key} = True\n+ :param options: list unparsed options from the CLI.\n+ Each option can have one of the following formats:\n+ - \"key=value\": the option key:value will be passed to the backend\n+ - \"key\": the option key:True will be passed to the backend\n \"\"\"\n- if options == None:\n+\n+ if options is None:\n return\n+\n for option in options:\n parsed = option.split(\"=\", 1)\n try:\n self[parsed[0]] = parsed[1]\n except IndexError:\n+ # If the option is present but doesn't map to a value, treat it as a boolean flag\n self[parsed[0]] = True\n \n+ def _load_config_file(self, path):\n+ \"\"\"\n+ Populates options from a configuration file\n+\n+ :param path: Path to the configuration file\n+ \"\"\"\n+ if path is None:\n+ return\n+\n+ try:\n+ with open(path, 'r') as config_file:\n+ backend_config = yaml.safe_load(config_file.read())\n+ for key in backend_config:\n+ self[key] = backend_config[key]\n+ except (IOError, OSError) as e:\n+ print(\"Failed to open backend configuration file '%s': %s\" % (path, str(e)), file=sys.stderr)\n+ exit(1)\n+ except yaml.YAMLError as e:\n+ print(\"Failed to parse backend configuration file '%s' as valid YAML: %s\" % (path, str(e)), file=sys.stderr)\n+ exit(1)\n+\n ### Generic backend base classes\n class BaseBackend:\n \"\"\"Base class for all backends\"\"\"\n", "issue": "Add possibility to configure backend options in conversion configuration file\n\n", "before_files": [{"content": "# Output backends for sigmac\n# Copyright 2016-2018 Thomas Patzke, Florian Roth\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport sigma\nfrom .mixins import RulenameCommentMixin, QuoteCharMixin\n\nclass BackendOptions(dict):\n \"\"\"Object contains all options that should be passed to the backend from command line (or other user interfaces)\"\"\"\n\n def __init__(self, options):\n \"\"\"\n Receives the argparser result from the backend option paramater value list (nargs=*) and builds the dict from it. There are two option types:\n\n * key=value: self{key} = value\n * key: self{key} = True\n \"\"\"\n if options == None:\n return\n for option in options:\n parsed = option.split(\"=\", 1)\n try:\n self[parsed[0]] = parsed[1]\n except IndexError:\n self[parsed[0]] = True\n\n### Generic backend base classes\nclass BaseBackend:\n \"\"\"Base class for all backends\"\"\"\n identifier = \"base\"\n active = False\n index_field = None # field name that is used to address indices\n file_list = None\n options = tuple() # a list of tuples with following elements: option name, default value, help text, target attribute name (option name if None)\n\n def __init__(self, sigmaconfig, backend_options=None):\n \"\"\"\n Initialize backend. This gets a sigmaconfig object, which is notified about the used backend class by\n passing the object instance to it.\n \"\"\"\n super().__init__()\n if not isinstance(sigmaconfig, (sigma.configuration.SigmaConfiguration, None)):\n raise TypeError(\"SigmaConfiguration object expected\")\n self.backend_options = backend_options\n self.sigmaconfig = sigmaconfig\n self.sigmaconfig.set_backend(self)\n\n # Parse options\n for option, default_value, _, target in self.options:\n if target is None:\n target = option\n setattr(self, target, self.backend_options.setdefault(option, default_value))\n\n def generate(self, sigmaparser):\n \"\"\"Method is called for each sigma rule and receives the parsed rule (SigmaParser)\"\"\"\n for parsed in sigmaparser.condparsed:\n query = self.generateQuery(parsed)\n before = self.generateBefore(parsed)\n after = self.generateAfter(parsed)\n\n result = \"\"\n if before is not None:\n result = before\n if query is not None:\n result += query\n if after is not None:\n result += after\n\n return result\n\n def generateQuery(self, parsed):\n result = self.generateNode(parsed.parsedSearch)\n if parsed.parsedAgg:\n result += self.generateAggregation(parsed.parsedAgg)\n return result\n\n def generateNode(self, node):\n if type(node) == sigma.parser.condition.ConditionAND:\n return self.generateANDNode(node)\n elif type(node) == sigma.parser.condition.ConditionOR:\n return self.generateORNode(node)\n elif type(node) == sigma.parser.condition.ConditionNOT:\n return self.generateNOTNode(node)\n elif type(node) == sigma.parser.condition.ConditionNULLValue:\n return self.generateNULLValueNode(node)\n elif type(node) == sigma.parser.condition.ConditionNotNULLValue:\n return self.generateNotNULLValueNode(node)\n elif type(node) == sigma.parser.condition.NodeSubexpression:\n return self.generateSubexpressionNode(node)\n elif type(node) == tuple:\n return self.generateMapItemNode(node)\n elif type(node) in (str, int):\n return self.generateValueNode(node)\n elif type(node) == list:\n return self.generateListNode(node)\n else:\n raise TypeError(\"Node type %s was not expected in Sigma parse tree\" % (str(type(node))))\n\n def generateANDNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateORNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNOTNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateSubexpressionNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateListNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateMapItemNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNULLValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateNotNULLValueNode(self, node):\n raise NotImplementedError(\"Node type not implemented for this backend\")\n\n def generateAggregation(self, agg):\n raise NotImplementedError(\"Aggregations not implemented for this backend\")\n\n def generateBefore(self, parsed):\n return \"\"\n\n def generateAfter(self, parsed):\n return \"\"\n\n def finalize(self):\n \"\"\"\n Is called after the last file was processed with generate(). The right place if this backend is not intended to\n look isolated at each rule, but generates an output which incorporates multiple rules, e.g. dashboards.\n \"\"\"\n pass\n\nclass SingleTextQueryBackend(RulenameCommentMixin, BaseBackend, QuoteCharMixin):\n \"\"\"Base class for backends that generate one text-based expression from a Sigma rule\"\"\"\n identifier = \"base-textquery\"\n active = False\n\n # the following class variables define the generation and behavior of queries from a parse tree some are prefilled with default values that are quite usual\n andToken = None # Token used for linking expressions with logical AND\n orToken = None # Same for OR\n notToken = None # Same for NOT\n subExpression = None # Syntax for subexpressions, usually parenthesis around it. %s is inner expression\n listExpression = None # Syntax for lists, %s are list items separated with listSeparator\n listSeparator = None # Character for separation of list items\n valueExpression = None # Expression of values, %s represents value\n nullExpression = None # Expression of queries for null values or non-existing fields. %s is field name\n notNullExpression = None # Expression of queries for not null values. %s is field name\n mapExpression = None # Syntax for field/value conditions. First %s is fieldname, second is value\n mapListsSpecialHandling = False # Same handling for map items with list values as for normal values (strings, integers) if True, generateMapItemListNode method is called with node\n mapListValueExpression = None # Syntax for field/value condititons where map value is a list\n\n def generateANDNode(self, node):\n generated = [ self.generateNode(val) for val in node ]\n filtered = [ g for g in generated if g is not None ]\n if filtered:\n return self.andToken.join(filtered)\n else:\n return None\n\n def generateORNode(self, node):\n generated = [ self.generateNode(val) for val in node ]\n filtered = [ g for g in generated if g is not None ]\n if filtered:\n return self.orToken.join(filtered)\n else:\n return None\n\n def generateNOTNode(self, node):\n generated = self.generateNode(node.item)\n if generated is not None:\n return self.notToken + generated\n else:\n return None\n\n def generateSubexpressionNode(self, node):\n generated = self.generateNode(node.items)\n if generated:\n return self.subExpression % generated\n else:\n return None\n\n def generateListNode(self, node):\n if not set([type(value) for value in node]).issubset({str, int}):\n raise TypeError(\"List values must be strings or numbers\")\n return self.listExpression % (self.listSeparator.join([self.generateNode(value) for value in node]))\n\n def generateMapItemNode(self, node):\n fieldname, value = node\n\n transformed_fieldname = self.fieldNameMapping(fieldname, value)\n if self.mapListsSpecialHandling == False and type(value) in (str, int, list) or self.mapListsSpecialHandling == True and type(value) in (str, int):\n return self.mapExpression % (transformed_fieldname, self.generateNode(value))\n elif type(value) == list:\n return self.generateMapItemListNode(transformed_fieldname, value)\n else:\n raise TypeError(\"Backend does not support map values of type \" + str(type(value)))\n\n def generateMapItemListNode(self, fieldname, value):\n return self.mapListValueExpression % (fieldname, self.generateNode(value))\n\n def generateValueNode(self, node):\n return self.valueExpression % (self.cleanValue(str(node)))\n\n def generateNULLValueNode(self, node):\n return self.nullExpression % (node.item)\n\n def generateNotNULLValueNode(self, node):\n return self.notNullExpression % (node.item)\n\n def fieldNameMapping(self, fieldname, value):\n \"\"\"\n Alter field names depending on the value(s). Backends may use this method to perform a final transformation of the field name\n in addition to the field mapping defined in the conversion configuration. The field name passed to this method was already\n transformed from the original name given in the Sigma rule.\n \"\"\"\n return fieldname\n", "path": "tools/sigma/backends/base.py"}]} | 3,306 | 726 |
gh_patches_debug_50783 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3282 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in NL API, 'Sentiment' object has no attribute 'score'
Upon executing the following code on GCE & locally I'm getting 'score' not found error locally while it works on GCE.
```
def analyze_text(text):
language_client = language.Client()
document = language_client.document_from_text(text)
annotations = document.annotate_text(include_sentiment=True)
print(annotations.sentiment.score)
print (annotations.sentiment.magnitude)
```
Only difference I can find is on GCE, the gcloud version is different than the local
**GCE gcloud**
gcloud --version
Google Cloud SDK 145.0.0
alpha 2017.02.21
app-engine-python 1.9.50
beta 2017.02.21
bq 2.0.24
bq-nix 2.0.24
core 2017.02.21
core-nix 2017.02.21
gcloud
gcloud-deps 2017.02.21
gcloud-deps-linux-x86_64 2017.02.21
gsutil 4.22
gsutil-nix 4.22
**Local gcloud**
gcloud --version
Google Cloud SDK 148.0.1
bq 2.0.24
bq-nix 2.0.24
core 2017.03.24
core-nix 2016.11.07
gcloud
gcloud-deps 2017.03.17
gcloud-deps-darwin-x86_64 2017.02.21
gsutil 4.23
gsutil-nix 4.19
Debugging locally, the response from NL API consist of polarity property and not score.
<img width="761" alt="image" src="https://cloud.githubusercontent.com/assets/1888535/24331449/62cbdbee-1252-11e7-9d29-8699df078fcf.png">
</issue>
<code>
[start of language/google/cloud/language/sentiment.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Definition for Google Cloud Natural Language API sentiment.
16
17 Sentiment is the response to an ``analyzeSentiment`` request.
18 """
19
20
21 class Sentiment(object):
22 """A Google Cloud Natural Language API sentiment object.
23
24 .. _Sentiment message: https://cloud.google.com/natural-language/\
25 reference/rest/v1/Sentiment
26 .. _Sentiment basics: https://cloud.google.com/natural-language/\
27 docs/basics#sentiment-analysis-values
28
29 See `Sentiment message`_ and `Sentiment basics`_.
30
31 :type score: float
32 :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range.
33 Larger numbers represent more positive sentiments.
34
35 :type magnitude: float
36 :param magnitude: A non-negative number in the ``[0, +inf)`` range, which
37 represents the absolute magnitude of sentiment
38 regardless of score (positive or negative).
39 """
40
41 def __init__(self, score, magnitude):
42 self.score = score
43 self.magnitude = magnitude
44
45 @classmethod
46 def from_api_repr(cls, payload):
47 """Convert a Sentiment from the JSON API into a :class:`Sentiment`.
48
49 :param payload: dict
50 :type payload: The value from the backend.
51
52 :rtype: :class:`Sentiment`
53 :returns: The sentiment parsed from the API representation.
54 """
55 score = payload['score']
56 magnitude = payload['magnitude']
57 return cls(score, magnitude)
58
[end of language/google/cloud/language/sentiment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/language/google/cloud/language/sentiment.py b/language/google/cloud/language/sentiment.py
--- a/language/google/cloud/language/sentiment.py
+++ b/language/google/cloud/language/sentiment.py
@@ -52,6 +52,6 @@
:rtype: :class:`Sentiment`
:returns: The sentiment parsed from the API representation.
"""
- score = payload['score']
+ score = payload.get('score', None)
magnitude = payload['magnitude']
return cls(score, magnitude)
| {"golden_diff": "diff --git a/language/google/cloud/language/sentiment.py b/language/google/cloud/language/sentiment.py\n--- a/language/google/cloud/language/sentiment.py\n+++ b/language/google/cloud/language/sentiment.py\n@@ -52,6 +52,6 @@\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n- score = payload['score']\n+ score = payload.get('score', None)\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "issue": "Error in NL API, 'Sentiment' object has no attribute 'score'\nUpon executing the following code on GCE & locally I'm getting 'score' not found error locally while it works on GCE.\r\n\r\n```\r\ndef analyze_text(text):\r\n language_client = language.Client()\r\n document = language_client.document_from_text(text)\r\n annotations = document.annotate_text(include_sentiment=True)\r\n print(annotations.sentiment.score)\r\n print (annotations.sentiment.magnitude)\r\n```\r\n\r\nOnly difference I can find is on GCE, the gcloud version is different than the local\r\n**GCE gcloud**\r\ngcloud --version\r\nGoogle Cloud SDK 145.0.0\r\nalpha 2017.02.21\r\napp-engine-python 1.9.50\r\nbeta 2017.02.21\r\nbq 2.0.24\r\nbq-nix 2.0.24\r\ncore 2017.02.21\r\ncore-nix 2017.02.21\r\ngcloud \r\ngcloud-deps 2017.02.21\r\ngcloud-deps-linux-x86_64 2017.02.21\r\ngsutil 4.22\r\ngsutil-nix 4.22\r\n\r\n**Local gcloud**\r\ngcloud --version\r\nGoogle Cloud SDK 148.0.1\r\nbq 2.0.24\r\nbq-nix 2.0.24\r\ncore 2017.03.24\r\ncore-nix 2016.11.07\r\ngcloud \r\ngcloud-deps 2017.03.17\r\ngcloud-deps-darwin-x86_64 2017.02.21\r\ngsutil 4.23\r\ngsutil-nix 4.19\r\n\r\nDebugging locally, the response from NL API consist of polarity property and not score.\r\n\r\n<img width=\"761\" alt=\"image\" src=\"https://cloud.githubusercontent.com/assets/1888535/24331449/62cbdbee-1252-11e7-9d29-8699df078fcf.png\">\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API sentiment.\n\nSentiment is the response to an ``analyzeSentiment`` request.\n\"\"\"\n\n\nclass Sentiment(object):\n \"\"\"A Google Cloud Natural Language API sentiment object.\n\n .. _Sentiment message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Sentiment\n .. _Sentiment basics: https://cloud.google.com/natural-language/\\\n docs/basics#sentiment-analysis-values\n\n See `Sentiment message`_ and `Sentiment basics`_.\n\n :type score: float\n :param score: Score of the sentiment in the ``[-1.0, 1.0]`` range.\n Larger numbers represent more positive sentiments.\n\n :type magnitude: float\n :param magnitude: A non-negative number in the ``[0, +inf)`` range, which\n represents the absolute magnitude of sentiment\n regardless of score (positive or negative).\n \"\"\"\n\n def __init__(self, score, magnitude):\n self.score = score\n self.magnitude = magnitude\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert a Sentiment from the JSON API into a :class:`Sentiment`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Sentiment`\n :returns: The sentiment parsed from the API representation.\n \"\"\"\n score = payload['score']\n magnitude = payload['magnitude']\n return cls(score, magnitude)\n", "path": "language/google/cloud/language/sentiment.py"}]} | 1,614 | 113 |
gh_patches_debug_7619 | rasdani/github-patches | git_diff | pyca__cryptography-2682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Any interest for an override parameter for _MAX_CLOCK_SKEW?
It seems like a narrow assumption to assume that the actors communicating via Fernet-encrypted messages will always be able to have somewhat synchronized clocks. In our case, we have no way to amend the clocks of the systems upon which our software runs and they often differ by at least minutes, if not hours or days.
Our current solution is to just stomp on `cryptography.fernet._MAX_CLOCK_SKEW` and set it to some `LARGE_VALUE`. Not ideal, but it gets the job done for now.
I can think of several options to improve the situation here, but most of them involve changes to this library's code.
1) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` function to disable the clock skew check altogether.
2) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` that defaults to `_MAX_CLOCK_SKEW` and use that value for the clock skew check.
3) Add ability to set an offset on the Fernet object (or pass it in to `decrypt()`) that would be applied to the extracted timestamp prior to being checked against `_MAX_CLOCK_SKEW`. It would be the library user's responsibility for determining the appropriate offset value for each external actor.
4) I could subclass Fernet and implement some appropriate behavior in my new class (would essentially involve copying the whole `decrypt()` function into my codebase just to tweak this specific portion's behavior)
Would any of options 1-3 be worth pursuing to the point of a pull-request? I can probably make a pretty good effort on each of them, but I don't want to spend the time if the maintainers don't think any are worth attempting.
</issue>
<code>
[start of src/cryptography/fernet.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import base64
8 import binascii
9 import os
10 import struct
11 import time
12
13 import six
14
15 from cryptography.exceptions import InvalidSignature
16 from cryptography.hazmat.backends import default_backend
17 from cryptography.hazmat.primitives import hashes, padding
18 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
19 from cryptography.hazmat.primitives.hmac import HMAC
20
21
22 class InvalidToken(Exception):
23 pass
24
25
26 _MAX_CLOCK_SKEW = 60
27
28
29 class Fernet(object):
30 def __init__(self, key, backend=None):
31 if backend is None:
32 backend = default_backend()
33
34 key = base64.urlsafe_b64decode(key)
35 if len(key) != 32:
36 raise ValueError(
37 "Fernet key must be 32 url-safe base64-encoded bytes."
38 )
39
40 self._signing_key = key[:16]
41 self._encryption_key = key[16:]
42 self._backend = backend
43
44 @classmethod
45 def generate_key(cls):
46 return base64.urlsafe_b64encode(os.urandom(32))
47
48 def encrypt(self, data):
49 current_time = int(time.time())
50 iv = os.urandom(16)
51 return self._encrypt_from_parts(data, current_time, iv)
52
53 def _encrypt_from_parts(self, data, current_time, iv):
54 if not isinstance(data, bytes):
55 raise TypeError("data must be bytes.")
56
57 padder = padding.PKCS7(algorithms.AES.block_size).padder()
58 padded_data = padder.update(data) + padder.finalize()
59 encryptor = Cipher(
60 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend
61 ).encryptor()
62 ciphertext = encryptor.update(padded_data) + encryptor.finalize()
63
64 basic_parts = (
65 b"\x80" + struct.pack(">Q", current_time) + iv + ciphertext
66 )
67
68 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
69 h.update(basic_parts)
70 hmac = h.finalize()
71 return base64.urlsafe_b64encode(basic_parts + hmac)
72
73 def decrypt(self, token, ttl=None):
74 if not isinstance(token, bytes):
75 raise TypeError("token must be bytes.")
76
77 current_time = int(time.time())
78
79 try:
80 data = base64.urlsafe_b64decode(token)
81 except (TypeError, binascii.Error):
82 raise InvalidToken
83
84 if not data or six.indexbytes(data, 0) != 0x80:
85 raise InvalidToken
86
87 try:
88 timestamp, = struct.unpack(">Q", data[1:9])
89 except struct.error:
90 raise InvalidToken
91 if ttl is not None:
92 if timestamp + ttl < current_time:
93 raise InvalidToken
94 if current_time + _MAX_CLOCK_SKEW < timestamp:
95 raise InvalidToken
96 h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
97 h.update(data[:-32])
98 try:
99 h.verify(data[-32:])
100 except InvalidSignature:
101 raise InvalidToken
102
103 iv = data[9:25]
104 ciphertext = data[25:-32]
105 decryptor = Cipher(
106 algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend
107 ).decryptor()
108 plaintext_padded = decryptor.update(ciphertext)
109 try:
110 plaintext_padded += decryptor.finalize()
111 except ValueError:
112 raise InvalidToken
113 unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
114
115 unpadded = unpadder.update(plaintext_padded)
116 try:
117 unpadded += unpadder.finalize()
118 except ValueError:
119 raise InvalidToken
120 return unpadded
121
122
123 class MultiFernet(object):
124 def __init__(self, fernets):
125 fernets = list(fernets)
126 if not fernets:
127 raise ValueError(
128 "MultiFernet requires at least one Fernet instance"
129 )
130 self._fernets = fernets
131
132 def encrypt(self, msg):
133 return self._fernets[0].encrypt(msg)
134
135 def decrypt(self, msg, ttl=None):
136 for f in self._fernets:
137 try:
138 return f.decrypt(msg, ttl)
139 except InvalidToken:
140 pass
141 raise InvalidToken
142
[end of src/cryptography/fernet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/fernet.py b/src/cryptography/fernet.py
--- a/src/cryptography/fernet.py
+++ b/src/cryptography/fernet.py
@@ -91,8 +91,10 @@
if ttl is not None:
if timestamp + ttl < current_time:
raise InvalidToken
- if current_time + _MAX_CLOCK_SKEW < timestamp:
- raise InvalidToken
+
+ if current_time + _MAX_CLOCK_SKEW < timestamp:
+ raise InvalidToken
+
h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)
h.update(data[:-32])
try:
| {"golden_diff": "diff --git a/src/cryptography/fernet.py b/src/cryptography/fernet.py\n--- a/src/cryptography/fernet.py\n+++ b/src/cryptography/fernet.py\n@@ -91,8 +91,10 @@\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n- if current_time + _MAX_CLOCK_SKEW < timestamp:\n- raise InvalidToken\n+\n+ if current_time + _MAX_CLOCK_SKEW < timestamp:\n+ raise InvalidToken\n+\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n", "issue": "Any interest for an override parameter for _MAX_CLOCK_SKEW?\nIt seems like a narrow assumption to assume that the actors communicating via Fernet-encrypted messages will always be able to have somewhat synchronized clocks. In our case, we have no way to amend the clocks of the systems upon which our software runs and they often differ by at least minutes, if not hours or days.\n\nOur current solution is to just stomp on `cryptography.fernet._MAX_CLOCK_SKEW` and set it to some `LARGE_VALUE`. Not ideal, but it gets the job done for now.\n\nI can think of several options to improve the situation here, but most of them involve changes to this library's code.\n\n1) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` function to disable the clock skew check altogether.\n2) Add an optional parameter to Fernet's `__init__()` function or `decrypt()` that defaults to `_MAX_CLOCK_SKEW` and use that value for the clock skew check.\n3) Add ability to set an offset on the Fernet object (or pass it in to `decrypt()`) that would be applied to the extracted timestamp prior to being checked against `_MAX_CLOCK_SKEW`. It would be the library user's responsibility for determining the appropriate offset value for each external actor.\n4) I could subclass Fernet and implement some appropriate behavior in my new class (would essentially involve copying the whole `decrypt()` function into my codebase just to tweak this specific portion's behavior)\n\nWould any of options 1-3 be worth pursuing to the point of a pull-request? I can probably make a pretty good effort on each of them, but I don't want to spend the time if the maintainers don't think any are worth attempting.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport binascii\nimport os\nimport struct\nimport time\n\nimport six\n\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes, padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass InvalidToken(Exception):\n pass\n\n\n_MAX_CLOCK_SKEW = 60\n\n\nclass Fernet(object):\n def __init__(self, key, backend=None):\n if backend is None:\n backend = default_backend()\n\n key = base64.urlsafe_b64decode(key)\n if len(key) != 32:\n raise ValueError(\n \"Fernet key must be 32 url-safe base64-encoded bytes.\"\n )\n\n self._signing_key = key[:16]\n self._encryption_key = key[16:]\n self._backend = backend\n\n @classmethod\n def generate_key(cls):\n return base64.urlsafe_b64encode(os.urandom(32))\n\n def encrypt(self, data):\n current_time = int(time.time())\n iv = os.urandom(16)\n return self._encrypt_from_parts(data, current_time, iv)\n\n def _encrypt_from_parts(self, data, current_time, iv):\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n\n padder = padding.PKCS7(algorithms.AES.block_size).padder()\n padded_data = padder.update(data) + padder.finalize()\n encryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).encryptor()\n ciphertext = encryptor.update(padded_data) + encryptor.finalize()\n\n basic_parts = (\n b\"\\x80\" + struct.pack(\">Q\", current_time) + iv + ciphertext\n )\n\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(basic_parts)\n hmac = h.finalize()\n return base64.urlsafe_b64encode(basic_parts + hmac)\n\n def decrypt(self, token, ttl=None):\n if not isinstance(token, bytes):\n raise TypeError(\"token must be bytes.\")\n\n current_time = int(time.time())\n\n try:\n data = base64.urlsafe_b64decode(token)\n except (TypeError, binascii.Error):\n raise InvalidToken\n\n if not data or six.indexbytes(data, 0) != 0x80:\n raise InvalidToken\n\n try:\n timestamp, = struct.unpack(\">Q\", data[1:9])\n except struct.error:\n raise InvalidToken\n if ttl is not None:\n if timestamp + ttl < current_time:\n raise InvalidToken\n if current_time + _MAX_CLOCK_SKEW < timestamp:\n raise InvalidToken\n h = HMAC(self._signing_key, hashes.SHA256(), backend=self._backend)\n h.update(data[:-32])\n try:\n h.verify(data[-32:])\n except InvalidSignature:\n raise InvalidToken\n\n iv = data[9:25]\n ciphertext = data[25:-32]\n decryptor = Cipher(\n algorithms.AES(self._encryption_key), modes.CBC(iv), self._backend\n ).decryptor()\n plaintext_padded = decryptor.update(ciphertext)\n try:\n plaintext_padded += decryptor.finalize()\n except ValueError:\n raise InvalidToken\n unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()\n\n unpadded = unpadder.update(plaintext_padded)\n try:\n unpadded += unpadder.finalize()\n except ValueError:\n raise InvalidToken\n return unpadded\n\n\nclass MultiFernet(object):\n def __init__(self, fernets):\n fernets = list(fernets)\n if not fernets:\n raise ValueError(\n \"MultiFernet requires at least one Fernet instance\"\n )\n self._fernets = fernets\n\n def encrypt(self, msg):\n return self._fernets[0].encrypt(msg)\n\n def decrypt(self, msg, ttl=None):\n for f in self._fernets:\n try:\n return f.decrypt(msg, ttl)\n except InvalidToken:\n pass\n raise InvalidToken\n", "path": "src/cryptography/fernet.py"}]} | 2,246 | 151 |
gh_patches_debug_7377 | rasdani/github-patches | git_diff | tobymao__sqlglot-1951 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
error in optimizer.qualify with mixed identifier quotation.
**Fully reproducible code snippet**:
```
from sqlglot import parse_one
from sqlglot.dialects.oracle import Oracle
from sqlglot.lineage import lineage
from sqlglot.optimizer.qualify import qualify
parser = Oracle()
sql = """
with base as
(
select x.dummy as COL_1
from dual x
)
select b."COL_1"
from base b
"""
ast = parse_one(sql, read=parser)
qualified = qualify(
ast,
dialect=parser,
schema=None,
)
```
error:
```
Exception has occurred: OptimizeError
Unknown column: COL_1
File "C:\Users\itay5\Desktop\work\sql\main.py", line 20, in <module>
qualified = qualify(
^^^^^^^^
sqlglot.errors.OptimizeError: Unknown column: COL_1
```
I get the same error while trying to use identify=False, and still no luck.
</issue>
<code>
[start of sqlglot/dialects/oracle.py]
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import seq_get
8 from sqlglot.tokens import TokenType
9
10
11 def _parse_xml_table(self: parser.Parser) -> exp.XMLTable:
12 this = self._parse_string()
13
14 passing = None
15 columns = None
16
17 if self._match_text_seq("PASSING"):
18 # The BY VALUE keywords are optional and are provided for semantic clarity
19 self._match_text_seq("BY", "VALUE")
20 passing = self._parse_csv(self._parse_column)
21
22 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
23
24 if self._match_text_seq("COLUMNS"):
25 columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))
26
27 return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
28
29
30 class Oracle(Dialect):
31 ALIAS_POST_TABLESAMPLE = True
32
33 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
34 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
35 TIME_MAPPING = {
36 "AM": "%p", # Meridian indicator with or without periods
37 "A.M.": "%p", # Meridian indicator with or without periods
38 "PM": "%p", # Meridian indicator with or without periods
39 "P.M.": "%p", # Meridian indicator with or without periods
40 "D": "%u", # Day of week (1-7)
41 "DAY": "%A", # name of day
42 "DD": "%d", # day of month (1-31)
43 "DDD": "%j", # day of year (1-366)
44 "DY": "%a", # abbreviated name of day
45 "HH": "%I", # Hour of day (1-12)
46 "HH12": "%I", # alias for HH
47 "HH24": "%H", # Hour of day (0-23)
48 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
49 "MI": "%M", # Minute (0-59)
50 "MM": "%m", # Month (01-12; January = 01)
51 "MON": "%b", # Abbreviated name of month
52 "MONTH": "%B", # Name of month
53 "SS": "%S", # Second (0-59)
54 "WW": "%W", # Week of year (1-53)
55 "YY": "%y", # 15
56 "YYYY": "%Y", # 2015
57 }
58
59 class Parser(parser.Parser):
60 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
61
62 FUNCTIONS = {
63 **parser.Parser.FUNCTIONS,
64 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
65 }
66
67 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
68 **parser.Parser.FUNCTION_PARSERS,
69 "XMLTABLE": _parse_xml_table,
70 }
71
72 TYPE_LITERAL_PARSERS = {
73 exp.DataType.Type.DATE: lambda self, this, _: self.expression(
74 exp.DateStrToDate, this=this
75 )
76 }
77
78 def _parse_column(self) -> t.Optional[exp.Expression]:
79 column = super()._parse_column()
80 if column:
81 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
82 return column
83
84 def _parse_hint(self) -> t.Optional[exp.Hint]:
85 if self._match(TokenType.HINT):
86 start = self._curr
87 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
88 self._advance()
89
90 if not self._curr:
91 self.raise_error("Expected */ after HINT")
92
93 end = self._tokens[self._index - 3]
94 return exp.Hint(expressions=[self._find_sql(start, end)])
95
96 return None
97
98 class Generator(generator.Generator):
99 LOCKING_READS_SUPPORTED = True
100 JOIN_HINTS = False
101 TABLE_HINTS = False
102 COLUMN_JOIN_MARKS_SUPPORTED = True
103
104 LIMIT_FETCH = "FETCH"
105
106 TYPE_MAPPING = {
107 **generator.Generator.TYPE_MAPPING,
108 exp.DataType.Type.TINYINT: "NUMBER",
109 exp.DataType.Type.SMALLINT: "NUMBER",
110 exp.DataType.Type.INT: "NUMBER",
111 exp.DataType.Type.BIGINT: "NUMBER",
112 exp.DataType.Type.DECIMAL: "NUMBER",
113 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
114 exp.DataType.Type.VARCHAR: "VARCHAR2",
115 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
116 exp.DataType.Type.NCHAR: "NCHAR",
117 exp.DataType.Type.TEXT: "CLOB",
118 exp.DataType.Type.BINARY: "BLOB",
119 exp.DataType.Type.VARBINARY: "BLOB",
120 }
121
122 TRANSFORMS = {
123 **generator.Generator.TRANSFORMS,
124 exp.DateStrToDate: lambda self, e: self.func(
125 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
126 ),
127 exp.Group: transforms.preprocess([transforms.unalias_group]),
128 exp.ILike: no_ilike_sql,
129 exp.Coalesce: rename_func("NVL"),
130 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
131 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
132 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
133 exp.Substring: rename_func("SUBSTR"),
134 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
135 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
136 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
137 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
138 exp.Trim: trim_sql,
139 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
140 }
141
142 PROPERTIES_LOCATION = {
143 **generator.Generator.PROPERTIES_LOCATION,
144 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
145 }
146
147 def offset_sql(self, expression: exp.Offset) -> str:
148 return f"{super().offset_sql(expression)} ROWS"
149
150 def xmltable_sql(self, expression: exp.XMLTable) -> str:
151 this = self.sql(expression, "this")
152 passing = self.expressions(expression, key="passing")
153 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
154 columns = self.expressions(expression, key="columns")
155 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
156 by_ref = (
157 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
158 )
159 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
160
161 class Tokenizer(tokens.Tokenizer):
162 VAR_SINGLE_TOKENS = {"@"}
163
164 KEYWORDS = {
165 **tokens.Tokenizer.KEYWORDS,
166 "(+)": TokenType.JOIN_MARKER,
167 "BINARY_DOUBLE": TokenType.DOUBLE,
168 "BINARY_FLOAT": TokenType.FLOAT,
169 "COLUMNS": TokenType.COLUMN,
170 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
171 "MINUS": TokenType.EXCEPT,
172 "NVARCHAR2": TokenType.NVARCHAR,
173 "SAMPLE": TokenType.TABLE_SAMPLE,
174 "START": TokenType.BEGIN,
175 "TOP": TokenType.TOP,
176 "VARCHAR2": TokenType.VARCHAR,
177 }
178
[end of sqlglot/dialects/oracle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -30,6 +30,9 @@
class Oracle(Dialect):
ALIAS_POST_TABLESAMPLE = True
+ # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
+ RESOLVES_IDENTIFIERS_AS_UPPERCASE = True
+
# https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
TIME_MAPPING = {
| {"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -30,6 +30,9 @@\n class Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n \n+ # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n+ RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n+\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n", "issue": "error in optimizer.qualify with mixed identifier quotation.\n**Fully reproducible code snippet**:\r\n```\r\nfrom sqlglot import parse_one\r\nfrom sqlglot.dialects.oracle import Oracle\r\nfrom sqlglot.lineage import lineage\r\nfrom sqlglot.optimizer.qualify import qualify\r\n\r\nparser = Oracle()\r\n\r\nsql = \"\"\"\r\nwith base as\r\n(\r\n\r\n select x.dummy as COL_1\r\n from dual x\r\n)\r\nselect b.\"COL_1\"\r\nfrom base b\r\n\"\"\"\r\n\r\nast = parse_one(sql, read=parser)\r\nqualified = qualify(\r\n ast,\r\n dialect=parser,\r\n schema=None,\r\n )\r\n\r\n```\r\nerror:\r\n```\r\nException has occurred: OptimizeError\r\nUnknown column: COL_1\r\n File \"C:\\Users\\itay5\\Desktop\\work\\sql\\main.py\", line 20, in <module>\r\n qualified = qualify(\r\n ^^^^^^^^\r\nsqlglot.errors.OptimizeError: Unknown column: COL_1\r\n```\r\nI get the same error while trying to use identify=False, and still no luck.\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self: parser.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Coalesce: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]} | 3,036 | 187 |
gh_patches_debug_35745 | rasdani/github-patches | git_diff | uclapi__uclapi-977 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Webhooks
They haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however.
</issue>
<code>
[start of backend/uclapi/roombookings/management/commands/trigger_webhooks.py]
1 from django.core.management.base import BaseCommand
2 from roombookings.models import Lock, BookingA, BookingB
3 from roombookings.helpers import _serialize_bookings
4 from dashboard.models import Webhook, WebhookTriggerHistory
5 from datetime import datetime
6 from deepdiff import DeepDiff
7 import grequests
8 from django.utils import timezone
9
10
11 class Command(BaseCommand):
12
13 help = 'Diff roombooking result sets and notify relevant webhooks'
14
15 def handle(self, *args, **options):
16 self.stdout.write("Triggering webhooks")
17
18 # currently locked table is the old one, more recent one is not locked
19 lock = Lock.objects.all()[0] # there is only ever one lock
20
21 if lock.bookingA:
22 old_booking_table = BookingA
23 new_booking_table = BookingB
24 else:
25 old_booking_table = BookingB
26 new_booking_table = BookingA
27
28 now = datetime.now()
29
30 old_bookings = _serialize_bookings(
31 old_booking_table.objects.filter(
32 startdatetime__gt=now
33 )
34 )
35 new_bookings = _serialize_bookings(
36 new_booking_table.objects.filter(
37 startdatetime__gt=now
38 )
39 )
40
41 ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)
42
43 webhooks = Webhook.objects.all()
44 # assumption: list of webhooks will be longer than ddiff
45
46 num_bookings_added = 0
47 num_bookings_removed = 0
48 if "iterable_item_added" in ddiff:
49 num_bookings_added = len(
50 ddiff["iterable_item_added"].values()
51 )
52
53 if "iterable_item_removed" in ddiff:
54 num_bookings_removed = len(
55 ddiff["iterable_item_removed"].values()
56 )
57
58 self.stdout.write(
59 "{} bookings added\n{} bookings removed.".format(
60 num_bookings_added,
61 num_bookings_removed
62 )
63 )
64
65 def webhook_map(webhook):
66 def webhook_filter(booking):
67 return (
68 (
69 webhook.siteid == '' or
70 booking["siteid"] == webhook.siteid
71 ) and
72 (
73 webhook.roomid == '' or
74 booking["roomid"] == webhook.roomid
75 ) and
76 (
77 webhook.contact == '' or
78 # mimick SQL 'like'
79 webhook.contact in str(booking["contact"])
80 )
81 )
82 output = {
83 "webhook_in_db": webhook,
84 "url": webhook.url,
85 "verification_secret": webhook.verification_secret
86 }
87 if "iterable_item_added" in ddiff:
88 bookings_added = list(filter(
89 webhook_filter, ddiff["iterable_item_added"].values()
90 ))
91 if bookings_added != []:
92 output["bookings_added"] = bookings_added
93 if "iterable_item_removed" in ddiff:
94 bookings_removed = list(filter(
95 webhook_filter, ddiff["iterable_item_removed"].values()
96 ))
97 if bookings_removed != []:
98 output["bookings_removed"] = bookings_removed
99
100 return output
101
102 webhooks_to_enact = list(map(webhook_map, webhooks))
103
104 unsent_requests = []
105 for idx, webhook in enumerate(webhooks_to_enact):
106 payload = {
107 "service": "roombookings",
108 "name": "bookings_changed",
109 "verification_secret": webhook["verification_secret"],
110 "content": {}
111 }
112
113 if "bookings_added" in webhook:
114 payload["content"]["bookings_added"] = (
115 webhook["bookings_added"]
116 )
117 if "bookings_removed" in webhook:
118 payload["content"]["bookings_removed"] = (
119 webhook["bookings_removed"]
120 )
121
122 webhooks_to_enact[idx]["payload"] = payload
123
124 if payload["content"] != {}:
125 unsent_requests.append(
126 grequests.post(
127 webhook["url"], json=payload, headers={
128 "User-Agent": "uclapi-bot/1"
129 }
130 )
131 )
132 self.stdout.write(
133 "Triggering {} webhooks.".format(len(unsent_requests))
134 )
135 grequests.map(unsent_requests)
136
137 for webhook in webhooks_to_enact:
138 if webhook["payload"]["content"] != {}:
139 webhook_in_db = webhook["webhook_in_db"]
140 webhook_in_db.last_fired = timezone.now()
141 webhook_in_db.save()
142
143 new_webhook_history_entry = WebhookTriggerHistory(
144 webhook=webhook_in_db,
145 payload=webhook["payload"]
146 )
147 new_webhook_history_entry.save()
148
149 self.stdout.write("Webhooks triggered.")
150
[end of backend/uclapi/roombookings/management/commands/trigger_webhooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py
@@ -4,16 +4,25 @@
from dashboard.models import Webhook, WebhookTriggerHistory
from datetime import datetime
from deepdiff import DeepDiff
-import grequests
from django.utils import timezone
+from requests_futures.sessions import FuturesSession
class Command(BaseCommand):
help = 'Diff roombooking result sets and notify relevant webhooks'
+ def add_arguments(self, parser):
+ parser.add_argument(
+ '--debug',
+ action='store_true',
+ dest='debug',
+ help='Print webhook responses',
+ )
+
def handle(self, *args, **options):
self.stdout.write("Triggering webhooks")
+ session = FuturesSession()
# currently locked table is the old one, more recent one is not locked
lock = Lock.objects.all()[0] # there is only ever one lock
@@ -121,10 +130,11 @@
webhooks_to_enact[idx]["payload"] = payload
- if payload["content"] != {}:
+ if payload["content"] != {} and webhook["url"] != "":
unsent_requests.append(
- grequests.post(
- webhook["url"], json=payload, headers={
+ session.post(
+ webhook["url"], json=payload,
+ headers={
"User-Agent": "uclapi-bot/1"
}
)
@@ -132,7 +142,11 @@
self.stdout.write(
"Triggering {} webhooks.".format(len(unsent_requests))
)
- grequests.map(unsent_requests)
+ if("debug" in options):
+ for i in unsent_requests:
+ self.stdout.write(
+ 'response status {0}'.format(i.result().status_code)
+ )
for webhook in webhooks_to_enact:
if webhook["payload"]["content"] != {}:
| {"golden_diff": "diff --git a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n--- a/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n+++ b/backend/uclapi/roombookings/management/commands/trigger_webhooks.py\n@@ -4,16 +4,25 @@\n from dashboard.models import Webhook, WebhookTriggerHistory\n from datetime import datetime\n from deepdiff import DeepDiff\n-import grequests\n from django.utils import timezone\n+from requests_futures.sessions import FuturesSession\n \n \n class Command(BaseCommand):\n \n help = 'Diff roombooking result sets and notify relevant webhooks'\n \n+ def add_arguments(self, parser):\n+ parser.add_argument(\n+ '--debug',\n+ action='store_true',\n+ dest='debug',\n+ help='Print webhook responses',\n+ )\n+\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n+ session = FuturesSession()\n \n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n@@ -121,10 +130,11 @@\n \n webhooks_to_enact[idx][\"payload\"] = payload\n \n- if payload[\"content\"] != {}:\n+ if payload[\"content\"] != {} and webhook[\"url\"] != \"\":\n unsent_requests.append(\n- grequests.post(\n- webhook[\"url\"], json=payload, headers={\n+ session.post(\n+ webhook[\"url\"], json=payload,\n+ headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n@@ -132,7 +142,11 @@\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n- grequests.map(unsent_requests)\n+ if(\"debug\" in options):\n+ for i in unsent_requests:\n+ self.stdout.write(\n+ 'response status {0}'.format(i.result().status_code)\n+ )\n \n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n", "issue": "Webhooks\nThey haven't been delivered for a _long_ time now, so we finally need to dig deep and see what is going wrong. The room data is definitely up to date, however.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom roombookings.models import Lock, BookingA, BookingB\nfrom roombookings.helpers import _serialize_bookings\nfrom dashboard.models import Webhook, WebhookTriggerHistory\nfrom datetime import datetime\nfrom deepdiff import DeepDiff\nimport grequests\nfrom django.utils import timezone\n\n\nclass Command(BaseCommand):\n\n help = 'Diff roombooking result sets and notify relevant webhooks'\n\n def handle(self, *args, **options):\n self.stdout.write(\"Triggering webhooks\")\n\n # currently locked table is the old one, more recent one is not locked\n lock = Lock.objects.all()[0] # there is only ever one lock\n\n if lock.bookingA:\n old_booking_table = BookingA\n new_booking_table = BookingB\n else:\n old_booking_table = BookingB\n new_booking_table = BookingA\n\n now = datetime.now()\n\n old_bookings = _serialize_bookings(\n old_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n new_bookings = _serialize_bookings(\n new_booking_table.objects.filter(\n startdatetime__gt=now\n )\n )\n\n ddiff = DeepDiff(old_bookings, new_bookings, ignore_order=True)\n\n webhooks = Webhook.objects.all()\n # assumption: list of webhooks will be longer than ddiff\n\n num_bookings_added = 0\n num_bookings_removed = 0\n if \"iterable_item_added\" in ddiff:\n num_bookings_added = len(\n ddiff[\"iterable_item_added\"].values()\n )\n\n if \"iterable_item_removed\" in ddiff:\n num_bookings_removed = len(\n ddiff[\"iterable_item_removed\"].values()\n )\n\n self.stdout.write(\n \"{} bookings added\\n{} bookings removed.\".format(\n num_bookings_added,\n num_bookings_removed\n )\n )\n\n def webhook_map(webhook):\n def webhook_filter(booking):\n return (\n (\n webhook.siteid == '' or\n booking[\"siteid\"] == webhook.siteid\n ) and\n (\n webhook.roomid == '' or\n booking[\"roomid\"] == webhook.roomid\n ) and\n (\n webhook.contact == '' or\n # mimick SQL 'like'\n webhook.contact in str(booking[\"contact\"])\n )\n )\n output = {\n \"webhook_in_db\": webhook,\n \"url\": webhook.url,\n \"verification_secret\": webhook.verification_secret\n }\n if \"iterable_item_added\" in ddiff:\n bookings_added = list(filter(\n webhook_filter, ddiff[\"iterable_item_added\"].values()\n ))\n if bookings_added != []:\n output[\"bookings_added\"] = bookings_added\n if \"iterable_item_removed\" in ddiff:\n bookings_removed = list(filter(\n webhook_filter, ddiff[\"iterable_item_removed\"].values()\n ))\n if bookings_removed != []:\n output[\"bookings_removed\"] = bookings_removed\n\n return output\n\n webhooks_to_enact = list(map(webhook_map, webhooks))\n\n unsent_requests = []\n for idx, webhook in enumerate(webhooks_to_enact):\n payload = {\n \"service\": \"roombookings\",\n \"name\": \"bookings_changed\",\n \"verification_secret\": webhook[\"verification_secret\"],\n \"content\": {}\n }\n\n if \"bookings_added\" in webhook:\n payload[\"content\"][\"bookings_added\"] = (\n webhook[\"bookings_added\"]\n )\n if \"bookings_removed\" in webhook:\n payload[\"content\"][\"bookings_removed\"] = (\n webhook[\"bookings_removed\"]\n )\n\n webhooks_to_enact[idx][\"payload\"] = payload\n\n if payload[\"content\"] != {}:\n unsent_requests.append(\n grequests.post(\n webhook[\"url\"], json=payload, headers={\n \"User-Agent\": \"uclapi-bot/1\"\n }\n )\n )\n self.stdout.write(\n \"Triggering {} webhooks.\".format(len(unsent_requests))\n )\n grequests.map(unsent_requests)\n\n for webhook in webhooks_to_enact:\n if webhook[\"payload\"][\"content\"] != {}:\n webhook_in_db = webhook[\"webhook_in_db\"]\n webhook_in_db.last_fired = timezone.now()\n webhook_in_db.save()\n\n new_webhook_history_entry = WebhookTriggerHistory(\n webhook=webhook_in_db,\n payload=webhook[\"payload\"]\n )\n new_webhook_history_entry.save()\n\n self.stdout.write(\"Webhooks triggered.\")\n", "path": "backend/uclapi/roombookings/management/commands/trigger_webhooks.py"}]} | 1,936 | 495 |
gh_patches_debug_27920 | rasdani/github-patches | git_diff | litestar-org__litestar-1581 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: `FileResponse` doesn't set `content-encoding` headers on gzip/brotli compressed files
### Description
When using `StaticFilesConfig` to serve compressed files (think `styles.css.gz`), Litestar will happily serve the file and even properly infer the mimetype, but won't set the correct `content-encoding` header required by the browser.
I will provide a PR with a test.
### URL to code causing the issue
_No response_
### MCVE
_No response_
### Steps to reproduce
_No response_
### Screenshots
```bash
""
```
### Logs
_No response_
### Litestar Version
1.51.10
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
</issue>
<code>
[start of starlite/response/file.py]
1 from email.utils import formatdate
2 from inspect import iscoroutine
3 from mimetypes import guess_type
4 from typing import (
5 TYPE_CHECKING,
6 Any,
7 AsyncGenerator,
8 Coroutine,
9 Dict,
10 Literal,
11 Optional,
12 Union,
13 cast,
14 )
15 from urllib.parse import quote
16 from zlib import adler32
17
18 from starlite.constants import ONE_MEGABYTE
19 from starlite.enums import MediaType
20 from starlite.exceptions import ImproperlyConfiguredException
21 from starlite.response.streaming import StreamingResponse
22 from starlite.status_codes import HTTP_200_OK
23 from starlite.utils.file import BaseLocalFileSystem, FileSystemAdapter
24
25 if TYPE_CHECKING:
26 from os import PathLike
27 from os import stat_result as stat_result_type
28
29 from anyio import Path
30
31 from starlite.datastructures import BackgroundTask, BackgroundTasks, ETag
32 from starlite.types import (
33 HTTPResponseBodyEvent,
34 PathType,
35 Receive,
36 ResponseCookies,
37 Send,
38 )
39 from starlite.types.file_types import FileInfo, FileSystemProtocol
40
41
42 async def async_file_iterator(
43 file_path: "PathType", chunk_size: int, adapter: "FileSystemAdapter"
44 ) -> AsyncGenerator[bytes, None]:
45 """Return an async that asynchronously reads a file and yields its chunks.
46
47 Args:
48 file_path: A path to a file.
49 chunk_size: The chunk file to use.
50 adapter: File system adapter class.
51 adapter: File system adapter class.
52
53 Returns:
54 An async generator.
55 """
56 async with await adapter.open(file_path) as file:
57 while chunk := await file.read(chunk_size):
58 yield chunk
59
60
61 def create_etag_for_file(path: "PathType", modified_time: float, file_size: int) -> str:
62 """Create an etag.
63
64 Notes:
65 - Function is derived from flask.
66
67 Returns:
68 An etag.
69 """
70 check = adler32(str(path).encode("utf-8")) & 0xFFFFFFFF
71 return f'"{modified_time}-{file_size}-{check}"'
72
73
74 class FileResponse(StreamingResponse):
75 """A response, streaming a file as response body."""
76
77 __slots__ = (
78 "chunk_size",
79 "content_disposition_type",
80 "etag",
81 "file_path",
82 "filename",
83 "adapter",
84 "file_info",
85 )
86
87 def __init__(
88 self,
89 path: Union[str, "PathLike", "Path"],
90 *,
91 background: Optional[Union["BackgroundTask", "BackgroundTasks"]] = None,
92 chunk_size: int = ONE_MEGABYTE,
93 content_disposition_type: Literal["attachment", "inline"] = "attachment",
94 cookies: Optional["ResponseCookies"] = None,
95 encoding: str = "utf-8",
96 etag: Optional["ETag"] = None,
97 file_system: Optional["FileSystemProtocol"] = None,
98 filename: Optional[str] = None,
99 file_info: Optional["FileInfo"] = None,
100 headers: Optional[Dict[str, Any]] = None,
101 is_head_response: bool = False,
102 media_type: Optional[Union[Literal[MediaType.TEXT], str]] = None,
103 stat_result: Optional["stat_result_type"] = None,
104 status_code: int = HTTP_200_OK,
105 ) -> None:
106 """Initialize ``FileResponse``
107
108 Notes:
109 - This class extends the :class:`StreamingResponse <starlite.response.StreamingResponse>` class.
110
111 Args:
112 path: A file path in one of the supported formats.
113 status_code: An HTTP status code.
114 media_type: A value for the response 'Content-Type' header. If not provided, the value will be either
115 derived from the filename if provided and supported by the stdlib, or will default to
116 'application/octet-stream'.
117 background: A :class:`BackgroundTask <starlite.datastructures.BackgroundTask>` instance or
118 :class:`BackgroundTasks <starlite.datastructures.BackgroundTasks>` to execute after the response is finished.
119 Defaults to None.
120 headers: A string keyed dictionary of response headers. Header keys are insensitive.
121 cookies: A list of :class:`Cookie <starlite.datastructures.Cookie>` instances to be set under the response 'Set-Cookie' header.
122 encoding: The encoding to be used for the response headers.
123 is_head_response: Whether the response should send only the headers ("head" request) or also the content.
124 filename: An optional filename to set in the header.
125 stat_result: An optional result of calling 'os.stat'. If not provided, this will be done by the response
126 constructor.
127 chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.
128 content_disposition_type: The type of the 'Content-Disposition'. Either ``inline`` or ``attachment``.
129 etag: An optional :class:`ETag <starlite.datastructures.ETag>` instance.
130 If not provided, an etag will be automatically generated.
131 file_system: An implementation of the :class:`FileSystemProtocol <starlite.types.FileSystemProtocol>`. If provided
132 it will be used to load the file.
133 file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.
134 """
135 if not media_type:
136 mimetype, _ = guess_type(filename) if filename else (None, None)
137 media_type = mimetype or "application/octet-stream"
138
139 self.chunk_size = chunk_size
140 self.content_disposition_type = content_disposition_type
141 self.etag = etag
142 self.file_path = path
143 self.filename = filename or ""
144 self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())
145
146 super().__init__(
147 content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),
148 status_code=status_code,
149 media_type=media_type,
150 background=background,
151 headers=headers,
152 cookies=cookies,
153 encoding=encoding,
154 is_head_response=is_head_response,
155 )
156
157 if file_info:
158 self.file_info: Union["FileInfo", "Coroutine[Any, Any, 'FileInfo']"] = file_info
159 elif stat_result:
160 self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)
161 else:
162 self.file_info = self.adapter.info(self.file_path)
163
164 @property
165 def content_disposition(self) -> str:
166 """Content disposition.
167
168 Returns:
169 A value for the 'Content-Disposition' header.
170 """
171 quoted_filename = quote(self.filename)
172 is_utf8 = quoted_filename == self.filename
173 if is_utf8:
174 return f'{self.content_disposition_type}; filename="{self.filename}"'
175 return f"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}"
176
177 @property
178 def content_length(self) -> int:
179 """Content length of the response if applicable.
180
181 Returns:
182 Returns the value of 'self.stat_result.st_size' to populate the 'Content-Length' header.
183 """
184 if isinstance(self.file_info, dict):
185 return self.file_info["size"]
186 return 0
187
188 async def send_body(self, send: "Send", receive: "Receive") -> None:
189 """Emit a stream of events correlating with the response body.
190
191 Args:
192 send: The ASGI send function.
193 receive: The ASGI receive function.
194
195 Returns:
196 None
197 """
198 if self.chunk_size < self.content_length:
199 await super().send_body(send=send, receive=receive)
200 return
201
202 async with await self.adapter.open(self.file_path) as file:
203 body_event: "HTTPResponseBodyEvent" = {
204 "type": "http.response.body",
205 "body": await file.read(),
206 "more_body": False,
207 }
208 await send(body_event)
209
210 async def start_response(self, send: "Send") -> None:
211 """Emit the start event of the response. This event includes the headers and status codes.
212
213 Args:
214 send: The ASGI send function.
215
216 Returns:
217 None
218 """
219 try:
220 fs_info = self.file_info = cast(
221 "FileInfo", (await self.file_info if iscoroutine(self.file_info) else self.file_info)
222 )
223 except FileNotFoundError as e:
224 raise ImproperlyConfiguredException(f"{self.file_path} does not exist") from e
225
226 if fs_info["type"] != "file":
227 raise ImproperlyConfiguredException(f"{self.file_path} is not a file")
228
229 self.set_header("last-modified", formatdate(fs_info["mtime"], usegmt=True))
230 self.set_header("content-disposition", self.content_disposition)
231 self.set_etag(
232 self.etag
233 or create_etag_for_file(path=self.file_path, modified_time=fs_info["mtime"], file_size=fs_info["size"])
234 )
235
236 await super().start_response(send=send)
237
[end of starlite/response/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/response/file.py b/starlite/response/file.py
--- a/starlite/response/file.py
+++ b/starlite/response/file.py
@@ -1,6 +1,6 @@
from email.utils import formatdate
from inspect import iscoroutine
-from mimetypes import guess_type
+from mimetypes import encodings_map, guess_type
from typing import (
TYPE_CHECKING,
Any,
@@ -38,6 +38,9 @@
)
from starlite.types.file_types import FileInfo, FileSystemProtocol
+# brotli not supported in 'mimetypes.encodings_map' until py 3.9.
+encodings_map[".br"] = "br"
+
async def async_file_iterator(
file_path: "PathType", chunk_size: int, adapter: "FileSystemAdapter"
@@ -133,8 +136,11 @@
file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.
"""
if not media_type:
- mimetype, _ = guess_type(filename) if filename else (None, None)
+ mimetype, content_encoding = guess_type(filename) if filename else (None, None)
media_type = mimetype or "application/octet-stream"
+ if content_encoding is not None:
+ headers = headers or {}
+ headers.update({"content-encoding": content_encoding})
self.chunk_size = chunk_size
self.content_disposition_type = content_disposition_type
| {"golden_diff": "diff --git a/starlite/response/file.py b/starlite/response/file.py\n--- a/starlite/response/file.py\n+++ b/starlite/response/file.py\n@@ -1,6 +1,6 @@\n from email.utils import formatdate\n from inspect import iscoroutine\n-from mimetypes import guess_type\n+from mimetypes import encodings_map, guess_type\n from typing import (\n TYPE_CHECKING,\n Any,\n@@ -38,6 +38,9 @@\n )\n from starlite.types.file_types import FileInfo, FileSystemProtocol\n \n+# brotli not supported in 'mimetypes.encodings_map' until py 3.9.\n+encodings_map[\".br\"] = \"br\"\n+\n \n async def async_file_iterator(\n file_path: \"PathType\", chunk_size: int, adapter: \"FileSystemAdapter\"\n@@ -133,8 +136,11 @@\n file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.\n \"\"\"\n if not media_type:\n- mimetype, _ = guess_type(filename) if filename else (None, None)\n+ mimetype, content_encoding = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n+ if content_encoding is not None:\n+ headers = headers or {}\n+ headers.update({\"content-encoding\": content_encoding})\n \n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n", "issue": "Bug: `FileResponse` doesn't set `content-encoding` headers on gzip/brotli compressed files\n### Description\n\nWhen using `StaticFilesConfig` to serve compressed files (think `styles.css.gz`), Litestar will happily serve the file and even properly infer the mimetype, but won't set the correct `content-encoding` header required by the browser.\r\n\r\nI will provide a PR with a test.\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n_No response_\n\n### Steps to reproduce\n\n_No response_\n\n### Screenshots\n\n```bash\n\"\"\n```\n\n\n### Logs\n\n_No response_\n\n### Litestar Version\n\n1.51.10\n\n### Platform\n\n- [X] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n", "before_files": [{"content": "from email.utils import formatdate\nfrom inspect import iscoroutine\nfrom mimetypes import guess_type\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncGenerator,\n Coroutine,\n Dict,\n Literal,\n Optional,\n Union,\n cast,\n)\nfrom urllib.parse import quote\nfrom zlib import adler32\n\nfrom starlite.constants import ONE_MEGABYTE\nfrom starlite.enums import MediaType\nfrom starlite.exceptions import ImproperlyConfiguredException\nfrom starlite.response.streaming import StreamingResponse\nfrom starlite.status_codes import HTTP_200_OK\nfrom starlite.utils.file import BaseLocalFileSystem, FileSystemAdapter\n\nif TYPE_CHECKING:\n from os import PathLike\n from os import stat_result as stat_result_type\n\n from anyio import Path\n\n from starlite.datastructures import BackgroundTask, BackgroundTasks, ETag\n from starlite.types import (\n HTTPResponseBodyEvent,\n PathType,\n Receive,\n ResponseCookies,\n Send,\n )\n from starlite.types.file_types import FileInfo, FileSystemProtocol\n\n\nasync def async_file_iterator(\n file_path: \"PathType\", chunk_size: int, adapter: \"FileSystemAdapter\"\n) -> AsyncGenerator[bytes, None]:\n \"\"\"Return an async that asynchronously reads a file and yields its chunks.\n\n Args:\n file_path: A path to a file.\n chunk_size: The chunk file to use.\n adapter: File system adapter class.\n adapter: File system adapter class.\n\n Returns:\n An async generator.\n \"\"\"\n async with await adapter.open(file_path) as file:\n while chunk := await file.read(chunk_size):\n yield chunk\n\n\ndef create_etag_for_file(path: \"PathType\", modified_time: float, file_size: int) -> str:\n \"\"\"Create an etag.\n\n Notes:\n - Function is derived from flask.\n\n Returns:\n An etag.\n \"\"\"\n check = adler32(str(path).encode(\"utf-8\")) & 0xFFFFFFFF\n return f'\"{modified_time}-{file_size}-{check}\"'\n\n\nclass FileResponse(StreamingResponse):\n \"\"\"A response, streaming a file as response body.\"\"\"\n\n __slots__ = (\n \"chunk_size\",\n \"content_disposition_type\",\n \"etag\",\n \"file_path\",\n \"filename\",\n \"adapter\",\n \"file_info\",\n )\n\n def __init__(\n self,\n path: Union[str, \"PathLike\", \"Path\"],\n *,\n background: Optional[Union[\"BackgroundTask\", \"BackgroundTasks\"]] = None,\n chunk_size: int = ONE_MEGABYTE,\n content_disposition_type: Literal[\"attachment\", \"inline\"] = \"attachment\",\n cookies: Optional[\"ResponseCookies\"] = None,\n encoding: str = \"utf-8\",\n etag: Optional[\"ETag\"] = None,\n file_system: Optional[\"FileSystemProtocol\"] = None,\n filename: Optional[str] = None,\n file_info: Optional[\"FileInfo\"] = None,\n headers: Optional[Dict[str, Any]] = None,\n is_head_response: bool = False,\n media_type: Optional[Union[Literal[MediaType.TEXT], str]] = None,\n stat_result: Optional[\"stat_result_type\"] = None,\n status_code: int = HTTP_200_OK,\n ) -> None:\n \"\"\"Initialize ``FileResponse``\n\n Notes:\n - This class extends the :class:`StreamingResponse <starlite.response.StreamingResponse>` class.\n\n Args:\n path: A file path in one of the supported formats.\n status_code: An HTTP status code.\n media_type: A value for the response 'Content-Type' header. If not provided, the value will be either\n derived from the filename if provided and supported by the stdlib, or will default to\n 'application/octet-stream'.\n background: A :class:`BackgroundTask <starlite.datastructures.BackgroundTask>` instance or\n :class:`BackgroundTasks <starlite.datastructures.BackgroundTasks>` to execute after the response is finished.\n Defaults to None.\n headers: A string keyed dictionary of response headers. Header keys are insensitive.\n cookies: A list of :class:`Cookie <starlite.datastructures.Cookie>` instances to be set under the response 'Set-Cookie' header.\n encoding: The encoding to be used for the response headers.\n is_head_response: Whether the response should send only the headers (\"head\" request) or also the content.\n filename: An optional filename to set in the header.\n stat_result: An optional result of calling 'os.stat'. If not provided, this will be done by the response\n constructor.\n chunk_size: The chunk sizes to use when streaming the file. Defaults to 1MB.\n content_disposition_type: The type of the 'Content-Disposition'. Either ``inline`` or ``attachment``.\n etag: An optional :class:`ETag <starlite.datastructures.ETag>` instance.\n If not provided, an etag will be automatically generated.\n file_system: An implementation of the :class:`FileSystemProtocol <starlite.types.FileSystemProtocol>`. If provided\n it will be used to load the file.\n file_info: The output of calling ``file_system.info(..)``, equivalent to providing a ``stat_result``.\n \"\"\"\n if not media_type:\n mimetype, _ = guess_type(filename) if filename else (None, None)\n media_type = mimetype or \"application/octet-stream\"\n\n self.chunk_size = chunk_size\n self.content_disposition_type = content_disposition_type\n self.etag = etag\n self.file_path = path\n self.filename = filename or \"\"\n self.adapter = FileSystemAdapter(file_system or BaseLocalFileSystem())\n\n super().__init__(\n content=async_file_iterator(file_path=path, chunk_size=chunk_size, adapter=self.adapter),\n status_code=status_code,\n media_type=media_type,\n background=background,\n headers=headers,\n cookies=cookies,\n encoding=encoding,\n is_head_response=is_head_response,\n )\n\n if file_info:\n self.file_info: Union[\"FileInfo\", \"Coroutine[Any, Any, 'FileInfo']\"] = file_info\n elif stat_result:\n self.file_info = self.adapter.parse_stat_result(result=stat_result, path=path)\n else:\n self.file_info = self.adapter.info(self.file_path)\n\n @property\n def content_disposition(self) -> str:\n \"\"\"Content disposition.\n\n Returns:\n A value for the 'Content-Disposition' header.\n \"\"\"\n quoted_filename = quote(self.filename)\n is_utf8 = quoted_filename == self.filename\n if is_utf8:\n return f'{self.content_disposition_type}; filename=\"{self.filename}\"'\n return f\"{self.content_disposition_type}; filename*=utf-8''{quoted_filename}\"\n\n @property\n def content_length(self) -> int:\n \"\"\"Content length of the response if applicable.\n\n Returns:\n Returns the value of 'self.stat_result.st_size' to populate the 'Content-Length' header.\n \"\"\"\n if isinstance(self.file_info, dict):\n return self.file_info[\"size\"]\n return 0\n\n async def send_body(self, send: \"Send\", receive: \"Receive\") -> None:\n \"\"\"Emit a stream of events correlating with the response body.\n\n Args:\n send: The ASGI send function.\n receive: The ASGI receive function.\n\n Returns:\n None\n \"\"\"\n if self.chunk_size < self.content_length:\n await super().send_body(send=send, receive=receive)\n return\n\n async with await self.adapter.open(self.file_path) as file:\n body_event: \"HTTPResponseBodyEvent\" = {\n \"type\": \"http.response.body\",\n \"body\": await file.read(),\n \"more_body\": False,\n }\n await send(body_event)\n\n async def start_response(self, send: \"Send\") -> None:\n \"\"\"Emit the start event of the response. This event includes the headers and status codes.\n\n Args:\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n fs_info = self.file_info = cast(\n \"FileInfo\", (await self.file_info if iscoroutine(self.file_info) else self.file_info)\n )\n except FileNotFoundError as e:\n raise ImproperlyConfiguredException(f\"{self.file_path} does not exist\") from e\n\n if fs_info[\"type\"] != \"file\":\n raise ImproperlyConfiguredException(f\"{self.file_path} is not a file\")\n\n self.set_header(\"last-modified\", formatdate(fs_info[\"mtime\"], usegmt=True))\n self.set_header(\"content-disposition\", self.content_disposition)\n self.set_etag(\n self.etag\n or create_etag_for_file(path=self.file_path, modified_time=fs_info[\"mtime\"], file_size=fs_info[\"size\"])\n )\n\n await super().start_response(send=send)\n", "path": "starlite/response/file.py"}]} | 3,239 | 318 |
gh_patches_debug_51896 | rasdani/github-patches | git_diff | geopandas__geopandas-1566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: the clip function don't take dynamically the geometry column name
Hi, today i noticed that the clip function didn't work for me but then i found which was the problem.
Import geodataframes with geometry called "WKT":
```
field_map = pd.read_csv('./field_map.csv')
field_map['WKT'] = field_map['WKT'].apply(wkt.loads)
field_map = gpd.GeoDataFrame(field_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})
print(field_map.columns)
boundary_map = pd.read_csv('./boundary_map.csv')
boundary_map['WKT'] = boundary_map['WKT'].apply(wkt.loads)
boundary_map = gpd.GeoDataFrame(boundary_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})
print(boundary_map.columns)
> Index(['Unnamed: 0', 'IDX', 'Value', 'WKT', 'WKTTypeID', 'IDXmaster'], dtype='object')
> Index(['Unnamed: 0', 'WKT'], dtype='object')
```
Clip the map and plot to validate:
```
clip_map = gpd.clip(field_map, boundary_map)
fig, ax = plt.subplots(figsize=(10,10))
clip_map.plot(ax=ax)
boundary_map.geometry.boundary.plot(ax=ax, color='red')
```

it seems that the clip has not worked but if we look at the of clip_map columns we see "WKT" and "geometry"

**SOLUTION:**
This worked for me, renaming the geometry column as "geometry"
```
field_map = field_map.rename_geometry('geometry')
boundary_map = boundary_map.rename_geometry('geometry')
clip_map = gpd.clip(field_map, boundary_map)
fig, ax = plt.subplots(figsize=(10,10))
clip_map.plot(ax=ax)
boundary_map.geometry.boundary.plot(ax=ax, color='red')
```

The clip function now work correctly
Regards
</issue>
<code>
[start of geopandas/tools/clip.py]
1 """
2 geopandas.clip
3 ==============
4
5 A module to clip vector data using GeoPandas.
6
7 """
8 import warnings
9
10 import numpy as np
11 import pandas as pd
12
13 from shapely.geometry import Polygon, MultiPolygon
14
15 from geopandas import GeoDataFrame, GeoSeries
16 from geopandas.array import _check_crs, _crs_mismatch_warn
17
18
19 def _clip_points(gdf, poly):
20 """Clip point geometry to the polygon extent.
21
22 Clip an input point GeoDataFrame to the polygon extent of the poly
23 parameter. Points that intersect the poly geometry are extracted with
24 associated attributes and returned.
25
26 Parameters
27 ----------
28 gdf : GeoDataFrame, GeoSeries
29 Composed of point geometry that will be clipped to the poly.
30
31 poly : (Multi)Polygon
32 Reference geometry used to spatially clip the data.
33
34 Returns
35 -------
36 GeoDataFrame
37 The returned GeoDataFrame is a subset of gdf that intersects
38 with poly.
39 """
40 return gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
41
42
43 def _clip_line_poly(gdf, poly):
44 """Clip line and polygon geometry to the polygon extent.
45
46 Clip an input line or polygon to the polygon extent of the poly
47 parameter. Parts of Lines or Polygons that intersect the poly geometry are
48 extracted with associated attributes and returned.
49
50 Parameters
51 ----------
52 gdf : GeoDataFrame, GeoSeries
53 Line or polygon geometry that is clipped to poly.
54
55 poly : (Multi)Polygon
56 Reference polygon for clipping.
57
58 Returns
59 -------
60 GeoDataFrame
61 The returned GeoDataFrame is a clipped subset of gdf
62 that intersects with poly.
63 """
64 gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate="intersects")]
65
66 # Clip the data with the polygon
67 if isinstance(gdf_sub, GeoDataFrame):
68 clipped = gdf_sub.copy()
69 clipped["geometry"] = gdf_sub.intersection(poly)
70 else:
71 # GeoSeries
72 clipped = gdf_sub.intersection(poly)
73
74 return clipped
75
76
77 def clip(gdf, mask, keep_geom_type=False):
78 """Clip points, lines, or polygon geometries to the mask extent.
79
80 Both layers must be in the same Coordinate Reference System (CRS).
81 The `gdf` will be clipped to the full extent of the clip object.
82
83 If there are multiple polygons in mask, data from `gdf` will be
84 clipped to the total boundary of all polygons in mask.
85
86 Parameters
87 ----------
88 gdf : GeoDataFrame or GeoSeries
89 Vector layer (point, line, polygon) to be clipped to mask.
90 mask : GeoDataFrame, GeoSeries, (Multi)Polygon
91 Polygon vector layer used to clip `gdf`.
92 The mask's geometry is dissolved into one geometric feature
93 and intersected with `gdf`.
94 keep_geom_type : boolean, default False
95 If True, return only geometries of original type in case of intersection
96 resulting in multiple geometry types or GeometryCollections.
97 If False, return all resulting geometries (potentially mixed-types).
98
99 Returns
100 -------
101 GeoDataFrame or GeoSeries
102 Vector data (points, lines, polygons) from `gdf` clipped to
103 polygon boundary from mask.
104
105 Examples
106 --------
107 Clip points (global cities) with a polygon (the South American continent):
108
109 >>> import geopandas
110 >>> path =
111 >>> world = geopandas.read_file(
112 ... geopandas.datasets.get_path('naturalearth_lowres'))
113 >>> south_america = world[world['continent'] == "South America"]
114 >>> capitals = geopandas.read_file(
115 ... geopandas.datasets.get_path('naturalearth_cities'))
116 >>> capitals.shape
117 (202, 2)
118 >>> sa_capitals = geopandas.clip(capitals, south_america)
119 >>> sa_capitals.shape
120 (12, 2)
121 """
122 if not isinstance(gdf, (GeoDataFrame, GeoSeries)):
123 raise TypeError(
124 "'gdf' should be GeoDataFrame or GeoSeries, got {}".format(type(gdf))
125 )
126
127 if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):
128 raise TypeError(
129 "'mask' should be GeoDataFrame, GeoSeries or"
130 "(Multi)Polygon, got {}".format(type(gdf))
131 )
132
133 if isinstance(mask, (GeoDataFrame, GeoSeries)):
134 if not _check_crs(gdf, mask):
135 _crs_mismatch_warn(gdf, mask, stacklevel=3)
136
137 if isinstance(mask, (GeoDataFrame, GeoSeries)):
138 box_mask = mask.total_bounds
139 else:
140 box_mask = mask.bounds
141 box_gdf = gdf.total_bounds
142 if not (
143 ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))
144 and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))
145 ):
146 return gdf.iloc[:0]
147
148 if isinstance(mask, (GeoDataFrame, GeoSeries)):
149 poly = mask.geometry.unary_union
150 else:
151 poly = mask
152
153 geom_types = gdf.geometry.type
154 poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
155 line_idx = np.asarray(
156 (geom_types == "LineString")
157 | (geom_types == "LinearRing")
158 | (geom_types == "MultiLineString")
159 )
160 point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
161 geomcoll_idx = np.asarray((geom_types == "GeometryCollection"))
162
163 if point_idx.any():
164 point_gdf = _clip_points(gdf[point_idx], poly)
165 else:
166 point_gdf = None
167
168 if poly_idx.any():
169 poly_gdf = _clip_line_poly(gdf[poly_idx], poly)
170 else:
171 poly_gdf = None
172
173 if line_idx.any():
174 line_gdf = _clip_line_poly(gdf[line_idx], poly)
175 else:
176 line_gdf = None
177
178 if geomcoll_idx.any():
179 geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)
180 else:
181 geomcoll_gdf = None
182
183 order = pd.Series(range(len(gdf)), index=gdf.index)
184 concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])
185
186 if keep_geom_type:
187 geomcoll_concat = (concat.geom_type == "GeometryCollection").any()
188 geomcoll_orig = geomcoll_idx.any()
189
190 new_collection = geomcoll_concat and not geomcoll_orig
191
192 if geomcoll_orig:
193 warnings.warn(
194 "keep_geom_type can not be called on a "
195 "GeoDataFrame with GeometryCollection."
196 )
197 else:
198 polys = ["Polygon", "MultiPolygon"]
199 lines = ["LineString", "MultiLineString", "LinearRing"]
200 points = ["Point", "MultiPoint"]
201
202 # Check that the gdf for multiple geom types (points, lines and/or polys)
203 orig_types_total = sum(
204 [
205 gdf.geom_type.isin(polys).any(),
206 gdf.geom_type.isin(lines).any(),
207 gdf.geom_type.isin(points).any(),
208 ]
209 )
210
211 # Check how many geometry types are in the clipped GeoDataFrame
212 clip_types_total = sum(
213 [
214 concat.geom_type.isin(polys).any(),
215 concat.geom_type.isin(lines).any(),
216 concat.geom_type.isin(points).any(),
217 ]
218 )
219
220 # Check there aren't any new geom types in the clipped GeoDataFrame
221 more_types = orig_types_total < clip_types_total
222
223 if orig_types_total > 1:
224 warnings.warn(
225 "keep_geom_type can not be called on a mixed type GeoDataFrame."
226 )
227 elif new_collection or more_types:
228 orig_type = gdf.geom_type.iloc[0]
229 if new_collection:
230 concat = concat.explode()
231 if orig_type in polys:
232 concat = concat.loc[concat.geom_type.isin(polys)]
233 elif orig_type in lines:
234 concat = concat.loc[concat.geom_type.isin(lines)]
235
236 # Return empty GeoDataFrame or GeoSeries if no shapes remain
237 if len(concat) == 0:
238 return gdf.iloc[:0]
239
240 # Preserve the original order of the input
241 if isinstance(concat, GeoDataFrame):
242 concat["_order"] = order
243 return concat.sort_values(by="_order").drop(columns="_order")
244 else:
245 concat = GeoDataFrame(geometry=concat)
246 concat["_order"] = order
247 return concat.sort_values(by="_order").geometry
248
[end of geopandas/tools/clip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/tools/clip.py b/geopandas/tools/clip.py
--- a/geopandas/tools/clip.py
+++ b/geopandas/tools/clip.py
@@ -66,7 +66,7 @@
# Clip the data with the polygon
if isinstance(gdf_sub, GeoDataFrame):
clipped = gdf_sub.copy()
- clipped["geometry"] = gdf_sub.intersection(poly)
+ clipped[gdf.geometry.name] = gdf_sub.intersection(poly)
else:
# GeoSeries
clipped = gdf_sub.intersection(poly)
| {"golden_diff": "diff --git a/geopandas/tools/clip.py b/geopandas/tools/clip.py\n--- a/geopandas/tools/clip.py\n+++ b/geopandas/tools/clip.py\n@@ -66,7 +66,7 @@\n # Clip the data with the polygon\n if isinstance(gdf_sub, GeoDataFrame):\n clipped = gdf_sub.copy()\n- clipped[\"geometry\"] = gdf_sub.intersection(poly)\n+ clipped[gdf.geometry.name] = gdf_sub.intersection(poly)\n else:\n # GeoSeries\n clipped = gdf_sub.intersection(poly)\n", "issue": "BUG: the clip function don't take dynamically the geometry column name \nHi, today i noticed that the clip function didn't work for me but then i found which was the problem.\r\n\r\nImport geodataframes with geometry called \"WKT\":\r\n```\r\nfield_map = pd.read_csv('./field_map.csv')\r\nfield_map['WKT'] = field_map['WKT'].apply(wkt.loads)\r\nfield_map = gpd.GeoDataFrame(field_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})\r\n\r\nprint(field_map.columns)\r\n\r\nboundary_map = pd.read_csv('./boundary_map.csv')\r\nboundary_map['WKT'] = boundary_map['WKT'].apply(wkt.loads)\r\nboundary_map = gpd.GeoDataFrame(boundary_map, geometry = 'WKT', crs = {'init': 'epsg:4326'})\r\n\r\nprint(boundary_map.columns)\r\n\r\n> Index(['Unnamed: 0', 'IDX', 'Value', 'WKT', 'WKTTypeID', 'IDXmaster'], dtype='object')\r\n> Index(['Unnamed: 0', 'WKT'], dtype='object')\r\n\r\n```\r\nClip the map and plot to validate:\r\n```\r\nclip_map = gpd.clip(field_map, boundary_map)\r\n\r\nfig, ax = plt.subplots(figsize=(10,10))\r\nclip_map.plot(ax=ax)\r\nboundary_map.geometry.boundary.plot(ax=ax, color='red')\r\n```\r\n\r\n\r\nit seems that the clip has not worked but if we look at the of clip_map columns we see \"WKT\" and \"geometry\"\r\n\r\n\r\n\r\n\r\n**SOLUTION:**\r\nThis worked for me, renaming the geometry column as \"geometry\"\r\n\r\n```\r\nfield_map = field_map.rename_geometry('geometry')\r\nboundary_map = boundary_map.rename_geometry('geometry')\r\n\r\nclip_map = gpd.clip(field_map, boundary_map)\r\n\r\nfig, ax = plt.subplots(figsize=(10,10))\r\nclip_map.plot(ax=ax)\r\nboundary_map.geometry.boundary.plot(ax=ax, color='red')\r\n```\r\n\r\n\r\n\r\nThe clip function now work correctly\r\nRegards\r\n\r\n\n", "before_files": [{"content": "\"\"\"\ngeopandas.clip\n==============\n\nA module to clip vector data using GeoPandas.\n\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely.geometry import Polygon, MultiPolygon\n\nfrom geopandas import GeoDataFrame, GeoSeries\nfrom geopandas.array import _check_crs, _crs_mismatch_warn\n\n\ndef _clip_points(gdf, poly):\n \"\"\"Clip point geometry to the polygon extent.\n\n Clip an input point GeoDataFrame to the polygon extent of the poly\n parameter. Points that intersect the poly geometry are extracted with\n associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Composed of point geometry that will be clipped to the poly.\n\n poly : (Multi)Polygon\n Reference geometry used to spatially clip the data.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a subset of gdf that intersects\n with poly.\n \"\"\"\n return gdf.iloc[gdf.sindex.query(poly, predicate=\"intersects\")]\n\n\ndef _clip_line_poly(gdf, poly):\n \"\"\"Clip line and polygon geometry to the polygon extent.\n\n Clip an input line or polygon to the polygon extent of the poly\n parameter. Parts of Lines or Polygons that intersect the poly geometry are\n extracted with associated attributes and returned.\n\n Parameters\n ----------\n gdf : GeoDataFrame, GeoSeries\n Line or polygon geometry that is clipped to poly.\n\n poly : (Multi)Polygon\n Reference polygon for clipping.\n\n Returns\n -------\n GeoDataFrame\n The returned GeoDataFrame is a clipped subset of gdf\n that intersects with poly.\n \"\"\"\n gdf_sub = gdf.iloc[gdf.sindex.query(poly, predicate=\"intersects\")]\n\n # Clip the data with the polygon\n if isinstance(gdf_sub, GeoDataFrame):\n clipped = gdf_sub.copy()\n clipped[\"geometry\"] = gdf_sub.intersection(poly)\n else:\n # GeoSeries\n clipped = gdf_sub.intersection(poly)\n\n return clipped\n\n\ndef clip(gdf, mask, keep_geom_type=False):\n \"\"\"Clip points, lines, or polygon geometries to the mask extent.\n\n Both layers must be in the same Coordinate Reference System (CRS).\n The `gdf` will be clipped to the full extent of the clip object.\n\n If there are multiple polygons in mask, data from `gdf` will be\n clipped to the total boundary of all polygons in mask.\n\n Parameters\n ----------\n gdf : GeoDataFrame or GeoSeries\n Vector layer (point, line, polygon) to be clipped to mask.\n mask : GeoDataFrame, GeoSeries, (Multi)Polygon\n Polygon vector layer used to clip `gdf`.\n The mask's geometry is dissolved into one geometric feature\n and intersected with `gdf`.\n keep_geom_type : boolean, default False\n If True, return only geometries of original type in case of intersection\n resulting in multiple geometry types or GeometryCollections.\n If False, return all resulting geometries (potentially mixed-types).\n\n Returns\n -------\n GeoDataFrame or GeoSeries\n Vector data (points, lines, polygons) from `gdf` clipped to\n polygon boundary from mask.\n\n Examples\n --------\n Clip points (global cities) with a polygon (the South American continent):\n\n >>> import geopandas\n >>> path =\n >>> world = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_lowres'))\n >>> south_america = world[world['continent'] == \"South America\"]\n >>> capitals = geopandas.read_file(\n ... geopandas.datasets.get_path('naturalearth_cities'))\n >>> capitals.shape\n (202, 2)\n >>> sa_capitals = geopandas.clip(capitals, south_america)\n >>> sa_capitals.shape\n (12, 2)\n \"\"\"\n if not isinstance(gdf, (GeoDataFrame, GeoSeries)):\n raise TypeError(\n \"'gdf' should be GeoDataFrame or GeoSeries, got {}\".format(type(gdf))\n )\n\n if not isinstance(mask, (GeoDataFrame, GeoSeries, Polygon, MultiPolygon)):\n raise TypeError(\n \"'mask' should be GeoDataFrame, GeoSeries or\"\n \"(Multi)Polygon, got {}\".format(type(gdf))\n )\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n if not _check_crs(gdf, mask):\n _crs_mismatch_warn(gdf, mask, stacklevel=3)\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n box_mask = mask.total_bounds\n else:\n box_mask = mask.bounds\n box_gdf = gdf.total_bounds\n if not (\n ((box_mask[0] <= box_gdf[2]) and (box_gdf[0] <= box_mask[2]))\n and ((box_mask[1] <= box_gdf[3]) and (box_gdf[1] <= box_mask[3]))\n ):\n return gdf.iloc[:0]\n\n if isinstance(mask, (GeoDataFrame, GeoSeries)):\n poly = mask.geometry.unary_union\n else:\n poly = mask\n\n geom_types = gdf.geometry.type\n poly_idx = np.asarray((geom_types == \"Polygon\") | (geom_types == \"MultiPolygon\"))\n line_idx = np.asarray(\n (geom_types == \"LineString\")\n | (geom_types == \"LinearRing\")\n | (geom_types == \"MultiLineString\")\n )\n point_idx = np.asarray((geom_types == \"Point\") | (geom_types == \"MultiPoint\"))\n geomcoll_idx = np.asarray((geom_types == \"GeometryCollection\"))\n\n if point_idx.any():\n point_gdf = _clip_points(gdf[point_idx], poly)\n else:\n point_gdf = None\n\n if poly_idx.any():\n poly_gdf = _clip_line_poly(gdf[poly_idx], poly)\n else:\n poly_gdf = None\n\n if line_idx.any():\n line_gdf = _clip_line_poly(gdf[line_idx], poly)\n else:\n line_gdf = None\n\n if geomcoll_idx.any():\n geomcoll_gdf = _clip_line_poly(gdf[geomcoll_idx], poly)\n else:\n geomcoll_gdf = None\n\n order = pd.Series(range(len(gdf)), index=gdf.index)\n concat = pd.concat([point_gdf, line_gdf, poly_gdf, geomcoll_gdf])\n\n if keep_geom_type:\n geomcoll_concat = (concat.geom_type == \"GeometryCollection\").any()\n geomcoll_orig = geomcoll_idx.any()\n\n new_collection = geomcoll_concat and not geomcoll_orig\n\n if geomcoll_orig:\n warnings.warn(\n \"keep_geom_type can not be called on a \"\n \"GeoDataFrame with GeometryCollection.\"\n )\n else:\n polys = [\"Polygon\", \"MultiPolygon\"]\n lines = [\"LineString\", \"MultiLineString\", \"LinearRing\"]\n points = [\"Point\", \"MultiPoint\"]\n\n # Check that the gdf for multiple geom types (points, lines and/or polys)\n orig_types_total = sum(\n [\n gdf.geom_type.isin(polys).any(),\n gdf.geom_type.isin(lines).any(),\n gdf.geom_type.isin(points).any(),\n ]\n )\n\n # Check how many geometry types are in the clipped GeoDataFrame\n clip_types_total = sum(\n [\n concat.geom_type.isin(polys).any(),\n concat.geom_type.isin(lines).any(),\n concat.geom_type.isin(points).any(),\n ]\n )\n\n # Check there aren't any new geom types in the clipped GeoDataFrame\n more_types = orig_types_total < clip_types_total\n\n if orig_types_total > 1:\n warnings.warn(\n \"keep_geom_type can not be called on a mixed type GeoDataFrame.\"\n )\n elif new_collection or more_types:\n orig_type = gdf.geom_type.iloc[0]\n if new_collection:\n concat = concat.explode()\n if orig_type in polys:\n concat = concat.loc[concat.geom_type.isin(polys)]\n elif orig_type in lines:\n concat = concat.loc[concat.geom_type.isin(lines)]\n\n # Return empty GeoDataFrame or GeoSeries if no shapes remain\n if len(concat) == 0:\n return gdf.iloc[:0]\n\n # Preserve the original order of the input\n if isinstance(concat, GeoDataFrame):\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").drop(columns=\"_order\")\n else:\n concat = GeoDataFrame(geometry=concat)\n concat[\"_order\"] = order\n return concat.sort_values(by=\"_order\").geometry\n", "path": "geopandas/tools/clip.py"}]} | 3,685 | 124 |
gh_patches_debug_2043 | rasdani/github-patches | git_diff | boto__boto-215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RDS call modify_dbinstance with multi_az = True doesn't actually set an instance to MultiAZ
Making a call to a non-multiaz instance with multi_az=True doesn't actually switch the parameter. I assume this is also true for creating one from scratch, but I haven't tested that yet.
</issue>
<code>
[start of boto/emr/connection.py]
1 # Copyright (c) 2010 Spotify AB
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a connection to the EMR service
24 """
25 import types
26
27 import boto
28 from boto.ec2.regioninfo import RegionInfo
29 from boto.emr.emrobject import JobFlow, RunJobFlowResponse
30 from boto.emr.step import JarStep
31 from boto.connection import AWSQueryConnection
32 from boto.exception import EmrResponseError
33
34 class EmrConnection(AWSQueryConnection):
35
36 APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
37 DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
38 DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
39 'elasticmapreduce.amazonaws.com')
40 ResponseError = EmrResponseError
41
42 # Constants for AWS Console debugging
43 DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
44 DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
45
46 def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
47 is_secure=True, port=None, proxy=None, proxy_port=None,
48 proxy_user=None, proxy_pass=None, debug=0,
49 https_connection_factory=None, region=None, path='/'):
50 if not region:
51 region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
52 self.region = region
53 AWSQueryConnection.__init__(self, aws_access_key_id,
54 aws_secret_access_key,
55 is_secure, port, proxy, proxy_port,
56 proxy_user, proxy_pass,
57 self.region.endpoint, debug,
58 https_connection_factory, path)
59
60 def _required_auth_capability(self):
61 return ['emr']
62
63 def describe_jobflow(self, jobflow_id):
64 """
65 Describes a single Elastic MapReduce job flow
66
67 :type jobflow_id: str
68 :param jobflow_id: The job flow id of interest
69 """
70 jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
71 if jobflows:
72 return jobflows[0]
73
74 def describe_jobflows(self, states=None, jobflow_ids=None,
75 created_after=None, created_before=None):
76 """
77 Retrieve all the Elastic MapReduce job flows on your account
78
79 :type states: list
80 :param states: A list of strings with job flow states wanted
81
82 :type jobflow_ids: list
83 :param jobflow_ids: A list of job flow IDs
84 :type created_after: datetime
85 :param created_after: Bound on job flow creation time
86
87 :type created_before: datetime
88 :param created_before: Bound on job flow creation time
89 """
90 params = {}
91
92 if states:
93 self.build_list_params(params, states, 'JobFlowStates.member')
94 if jobflow_ids:
95 self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
96 if created_after:
97 params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')
98 if created_before:
99 params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')
100
101 return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
102
103 def terminate_jobflow(self, jobflow_id):
104 """
105 Terminate an Elastic MapReduce job flow
106
107 :type jobflow_id: str
108 :param jobflow_id: A jobflow id
109 """
110 self.terminate_jobflows([jobflow_id])
111
112 def terminate_jobflows(self, jobflow_ids):
113 """
114 Terminate an Elastic MapReduce job flow
115
116 :type jobflow_ids: list
117 :param jobflow_ids: A list of job flow IDs
118 """
119 params = {}
120 self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
121 return self.get_status('TerminateJobFlows', params)
122
123 def add_jobflow_steps(self, jobflow_id, steps):
124 """
125 Adds steps to a jobflow
126
127 :type jobflow_id: str
128 :param jobflow_id: The job flow id
129 :type steps: list(boto.emr.Step)
130 :param steps: A list of steps to add to the job
131 """
132 if type(steps) != types.ListType:
133 steps = [steps]
134 params = {}
135 params['JobFlowId'] = jobflow_id
136
137 # Step args
138 step_args = [self._build_step_args(step) for step in steps]
139 params.update(self._build_step_list(step_args))
140
141 return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)
142
143 def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,
144 master_instance_type='m1.small',
145 slave_instance_type='m1.small', num_instances=1,
146 action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
147 enable_debugging=False,
148 hadoop_version='0.18',
149 steps=[],
150 bootstrap_actions=[]):
151 """
152 Runs a job flow
153
154 :type name: str
155 :param name: Name of the job flow
156 :type log_uri: str
157 :param log_uri: URI of the S3 bucket to place logs
158 :type ec2_keyname: str
159 :param ec2_keyname: EC2 key used for the instances
160 :type availability_zone: str
161 :param availability_zone: EC2 availability zone of the cluster
162 :type master_instance_type: str
163 :param master_instance_type: EC2 instance type of the master
164 :type slave_instance_type: str
165 :param slave_instance_type: EC2 instance type of the slave nodes
166 :type num_instances: int
167 :param num_instances: Number of instances in the Hadoop cluster
168 :type action_on_failure: str
169 :param action_on_failure: Action to take if a step terminates
170 :type keep_alive: bool
171 :param keep_alive: Denotes whether the cluster should stay alive upon completion
172 :type enable_debugging: bool
173 :param enable_debugging: Denotes whether AWS console debugging should be enabled.
174 :type steps: list(boto.emr.Step)
175 :param steps: List of steps to add with the job
176
177 :rtype: str
178 :return: The jobflow id
179 """
180 params = {}
181 if action_on_failure:
182 params['ActionOnFailure'] = action_on_failure
183 params['Name'] = name
184 params['LogUri'] = log_uri
185
186 # Instance args
187 instance_params = self._build_instance_args(ec2_keyname, availability_zone,
188 master_instance_type, slave_instance_type,
189 num_instances, keep_alive, hadoop_version)
190 params.update(instance_params)
191
192 # Debugging step from EMR API docs
193 if enable_debugging:
194 debugging_step = JarStep(name='Setup Hadoop Debugging',
195 action_on_failure='TERMINATE_JOB_FLOW',
196 main_class=None,
197 jar=self.DebuggingJar,
198 step_args=self.DebuggingArgs)
199 steps.insert(0, debugging_step)
200
201 # Step args
202 if steps:
203 step_args = [self._build_step_args(step) for step in steps]
204 params.update(self._build_step_list(step_args))
205
206 if bootstrap_actions:
207 bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
208 params.update(self._build_bootstrap_action_list(bootstrap_action_args))
209
210 response = self.get_object('RunJobFlow', params, RunJobFlowResponse)
211 return response.jobflowid
212
213 def _build_bootstrap_action_args(self, bootstrap_action):
214 bootstrap_action_params = {}
215 bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
216
217 try:
218 bootstrap_action_params['Name'] = bootstrap_action.name
219 except AttributeError:
220 pass
221
222 args = bootstrap_action.args()
223 if args:
224 self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')
225
226 return bootstrap_action_params
227
228 def _build_step_args(self, step):
229 step_params = {}
230 step_params['ActionOnFailure'] = step.action_on_failure
231 step_params['HadoopJarStep.Jar'] = step.jar()
232
233 main_class = step.main_class()
234 if main_class:
235 step_params['HadoopJarStep.MainClass'] = main_class
236
237 args = step.args()
238 if args:
239 self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')
240
241 step_params['Name'] = step.name
242 return step_params
243
244 def _build_bootstrap_action_list(self, bootstrap_actions):
245 if type(bootstrap_actions) != types.ListType:
246 bootstrap_actions = [bootstrap_actions]
247
248 params = {}
249 for i, bootstrap_action in enumerate(bootstrap_actions):
250 for key, value in bootstrap_action.iteritems():
251 params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value
252 return params
253
254 def _build_step_list(self, steps):
255 if type(steps) != types.ListType:
256 steps = [steps]
257
258 params = {}
259 for i, step in enumerate(steps):
260 for key, value in step.iteritems():
261 params['Steps.member.%s.%s' % (i+1, key)] = value
262 return params
263
264 def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,
265 slave_instance_type, num_instances, keep_alive, hadoop_version):
266 params = {
267 'Instances.MasterInstanceType' : master_instance_type,
268 'Instances.SlaveInstanceType' : slave_instance_type,
269 'Instances.InstanceCount' : num_instances,
270 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
271 'Instances.HadoopVersion' : hadoop_version
272 }
273
274 if ec2_keyname:
275 params['Instances.Ec2KeyName'] = ec2_keyname
276 if availability_zone:
277 params['Placement'] = availability_zone
278
279 return params
280
281
[end of boto/emr/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/boto/emr/connection.py b/boto/emr/connection.py
--- a/boto/emr/connection.py
+++ b/boto/emr/connection.py
@@ -274,7 +274,7 @@
if ec2_keyname:
params['Instances.Ec2KeyName'] = ec2_keyname
if availability_zone:
- params['Placement'] = availability_zone
+ params['Placement.AvailabilityZone'] = availability_zone
return params
| {"golden_diff": "diff --git a/boto/emr/connection.py b/boto/emr/connection.py\n--- a/boto/emr/connection.py\n+++ b/boto/emr/connection.py\n@@ -274,7 +274,7 @@\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n- params['Placement'] = availability_zone\n+ params['Placement.AvailabilityZone'] = availability_zone\n \n return params\n", "issue": "RDS call modify_dbinstance with multi_az = True doesn't actually set an instance to MultiAZ\nMaking a call to a non-multiaz instance with multi_az=True doesn't actually switch the parameter. I assume this is also true for creating one from scratch, but I haven't tested that yet.\n\n", "before_files": [{"content": "# Copyright (c) 2010 Spotify AB\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a connection to the EMR service\n\"\"\"\nimport types\n\nimport boto\nfrom boto.ec2.regioninfo import RegionInfo\nfrom boto.emr.emrobject import JobFlow, RunJobFlowResponse\nfrom boto.emr.step import JarStep\nfrom boto.connection import AWSQueryConnection\nfrom boto.exception import EmrResponseError\n\nclass EmrConnection(AWSQueryConnection):\n\n APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')\n DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')\n DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',\n 'elasticmapreduce.amazonaws.com')\n ResponseError = EmrResponseError\n\n # Constants for AWS Console debugging\n DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'\n DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n is_secure=True, port=None, proxy=None, proxy_port=None,\n proxy_user=None, proxy_pass=None, debug=0,\n https_connection_factory=None, region=None, path='/'):\n if not region:\n region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)\n self.region = region\n AWSQueryConnection.__init__(self, aws_access_key_id,\n aws_secret_access_key,\n is_secure, port, proxy, proxy_port,\n proxy_user, proxy_pass,\n self.region.endpoint, debug,\n https_connection_factory, path)\n\n def _required_auth_capability(self):\n return ['emr']\n\n def describe_jobflow(self, jobflow_id):\n \"\"\"\n Describes a single Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id of interest\n \"\"\"\n jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])\n if jobflows:\n return jobflows[0]\n\n def describe_jobflows(self, states=None, jobflow_ids=None,\n created_after=None, created_before=None):\n \"\"\"\n Retrieve all the Elastic MapReduce job flows on your account\n\n :type states: list\n :param states: A list of strings with job flow states wanted\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n :type created_after: datetime\n :param created_after: Bound on job flow creation time\n\n :type created_before: datetime\n :param created_before: Bound on job flow creation time\n \"\"\"\n params = {}\n\n if states:\n self.build_list_params(params, states, 'JobFlowStates.member')\n if jobflow_ids:\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n if created_after:\n params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')\n if created_before:\n params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')\n\n return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])\n\n def terminate_jobflow(self, jobflow_id):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_id: str\n :param jobflow_id: A jobflow id \n \"\"\"\n self.terminate_jobflows([jobflow_id]) \n\n def terminate_jobflows(self, jobflow_ids):\n \"\"\"\n Terminate an Elastic MapReduce job flow\n\n :type jobflow_ids: list\n :param jobflow_ids: A list of job flow IDs\n \"\"\"\n params = {}\n self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')\n return self.get_status('TerminateJobFlows', params)\n\n def add_jobflow_steps(self, jobflow_id, steps):\n \"\"\"\n Adds steps to a jobflow\n\n :type jobflow_id: str\n :param jobflow_id: The job flow id\n :type steps: list(boto.emr.Step)\n :param steps: A list of steps to add to the job\n \"\"\"\n if type(steps) != types.ListType:\n steps = [steps]\n params = {}\n params['JobFlowId'] = jobflow_id\n\n # Step args\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)\n\n def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,\n master_instance_type='m1.small',\n slave_instance_type='m1.small', num_instances=1,\n action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,\n enable_debugging=False,\n hadoop_version='0.18',\n steps=[],\n bootstrap_actions=[]):\n \"\"\"\n Runs a job flow\n\n :type name: str\n :param name: Name of the job flow\n :type log_uri: str\n :param log_uri: URI of the S3 bucket to place logs\n :type ec2_keyname: str\n :param ec2_keyname: EC2 key used for the instances\n :type availability_zone: str\n :param availability_zone: EC2 availability zone of the cluster\n :type master_instance_type: str\n :param master_instance_type: EC2 instance type of the master\n :type slave_instance_type: str\n :param slave_instance_type: EC2 instance type of the slave nodes\n :type num_instances: int\n :param num_instances: Number of instances in the Hadoop cluster\n :type action_on_failure: str\n :param action_on_failure: Action to take if a step terminates\n :type keep_alive: bool\n :param keep_alive: Denotes whether the cluster should stay alive upon completion\n :type enable_debugging: bool\n :param enable_debugging: Denotes whether AWS console debugging should be enabled.\n :type steps: list(boto.emr.Step)\n :param steps: List of steps to add with the job\n\n :rtype: str\n :return: The jobflow id\n \"\"\"\n params = {}\n if action_on_failure:\n params['ActionOnFailure'] = action_on_failure\n params['Name'] = name\n params['LogUri'] = log_uri\n\n # Instance args\n instance_params = self._build_instance_args(ec2_keyname, availability_zone,\n master_instance_type, slave_instance_type,\n num_instances, keep_alive, hadoop_version)\n params.update(instance_params)\n\n # Debugging step from EMR API docs\n if enable_debugging:\n debugging_step = JarStep(name='Setup Hadoop Debugging',\n action_on_failure='TERMINATE_JOB_FLOW',\n main_class=None,\n jar=self.DebuggingJar,\n step_args=self.DebuggingArgs)\n steps.insert(0, debugging_step)\n\n # Step args\n if steps:\n step_args = [self._build_step_args(step) for step in steps]\n params.update(self._build_step_list(step_args))\n\n if bootstrap_actions:\n bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]\n params.update(self._build_bootstrap_action_list(bootstrap_action_args))\n\n response = self.get_object('RunJobFlow', params, RunJobFlowResponse)\n return response.jobflowid\n\n def _build_bootstrap_action_args(self, bootstrap_action):\n bootstrap_action_params = {}\n bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path\n\n try:\n bootstrap_action_params['Name'] = bootstrap_action.name\n except AttributeError:\n pass\n\n args = bootstrap_action.args()\n if args:\n self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')\n\n return bootstrap_action_params\n\n def _build_step_args(self, step):\n step_params = {}\n step_params['ActionOnFailure'] = step.action_on_failure\n step_params['HadoopJarStep.Jar'] = step.jar()\n\n main_class = step.main_class()\n if main_class:\n step_params['HadoopJarStep.MainClass'] = main_class\n\n args = step.args()\n if args:\n self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')\n\n step_params['Name'] = step.name\n return step_params\n\n def _build_bootstrap_action_list(self, bootstrap_actions):\n if type(bootstrap_actions) != types.ListType:\n bootstrap_actions = [bootstrap_actions]\n\n params = {}\n for i, bootstrap_action in enumerate(bootstrap_actions):\n for key, value in bootstrap_action.iteritems():\n params['BootstrapActions.member.%s.%s' % (i + 1, key)] = value\n return params\n\n def _build_step_list(self, steps):\n if type(steps) != types.ListType:\n steps = [steps]\n\n params = {}\n for i, step in enumerate(steps):\n for key, value in step.iteritems():\n params['Steps.member.%s.%s' % (i+1, key)] = value\n return params\n\n def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,\n slave_instance_type, num_instances, keep_alive, hadoop_version):\n params = {\n 'Instances.MasterInstanceType' : master_instance_type,\n 'Instances.SlaveInstanceType' : slave_instance_type,\n 'Instances.InstanceCount' : num_instances,\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\n 'Instances.HadoopVersion' : hadoop_version\n }\n\n if ec2_keyname:\n params['Instances.Ec2KeyName'] = ec2_keyname\n if availability_zone:\n params['Placement'] = availability_zone\n\n return params\n\n", "path": "boto/emr/connection.py"}]} | 3,767 | 104 |
gh_patches_debug_34858 | rasdani/github-patches | git_diff | ansible-collections__amazon.aws-430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aws_service_ip_ranges suppport for ipv6
### Summary
We are using amazon.aws collection and we noticed that the aws_service_ip_ranges does not have an option to return IPv6 ranges.
### Issue Type
Feature Idea
### Component Name
`{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }`
Should return a list of IPv6 addresses that correspond to the Route53 health check.
### Pull Request
#430
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```
vars:
rt53_ranges: "{{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for x in rt53_ranges %}{{ x }} {% endfor %}"
# ###"2600:1f14:7ff:f800::/56,2600:1f14:fff:f800::/56"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/lookup/aws_service_ip_ranges.py]
1 # (c) 2016 James Turner <[email protected]>
2 # (c) 2017 Ansible Project
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4 from __future__ import (absolute_import, division, print_function)
5 __metaclass__ = type
6
7 DOCUMENTATION = '''
8 lookup: aws_service_ip_ranges
9 author:
10 - James Turner <[email protected]>
11 requirements:
12 - must have public internet connectivity
13 short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
14 description:
15 - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
16 - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
17 options:
18 service:
19 description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
20 region:
21 description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
22 '''
23
24 EXAMPLES = """
25 vars:
26 ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
27 tasks:
28
29 - name: "use list return option and iterate as a loop"
30 debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
31 # "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
32
33 - name: "Pull S3 IP ranges, and print the default return style"
34 debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
35 # "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
36 """
37
38 RETURN = """
39 _raw:
40 description: comma-separated list of CIDR ranges
41 """
42
43
44 import json
45
46 from ansible.errors import AnsibleError
47 from ansible.module_utils.six.moves.urllib.error import HTTPError
48 from ansible.module_utils.six.moves.urllib.error import URLError
49 from ansible.module_utils._text import to_native
50 from ansible.module_utils.urls import ConnectionError
51 from ansible.module_utils.urls import open_url
52 from ansible.module_utils.urls import SSLValidationError
53 from ansible.plugins.lookup import LookupBase
54
55
56 class LookupModule(LookupBase):
57 def run(self, terms, variables, **kwargs):
58 try:
59 resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
60 amazon_response = json.load(resp)['prefixes']
61 except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
62 # on Python 3+, json.decoder.JSONDecodeError is raised for bad
63 # JSON. On 2.x it's a ValueError
64 raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
65 except HTTPError as e:
66 raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
67 except SSLValidationError as e:
68 raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
69 except URLError as e:
70 raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
71 except ConnectionError as e:
72 raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
73
74 if 'region' in kwargs:
75 region = kwargs['region']
76 amazon_response = (item for item in amazon_response if item['region'] == region)
77 if 'service' in kwargs:
78 service = str.upper(kwargs['service'])
79 amazon_response = (item for item in amazon_response if item['service'] == service)
80
81 return [item['ip_prefix'] for item in amazon_response]
82
[end of plugins/lookup/aws_service_ip_ranges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py
--- a/plugins/lookup/aws_service_ip_ranges.py
+++ b/plugins/lookup/aws_service_ip_ranges.py
@@ -19,6 +19,9 @@
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
+ ipv6_prefixes:
+ description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'
+ version_added: 2.1.0
'''
EXAMPLES = """
@@ -40,7 +43,6 @@
description: comma-separated list of CIDR ranges
"""
-
import json
from ansible.errors import AnsibleError
@@ -55,9 +57,16 @@
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
+ if "ipv6_prefixes" in kwargs and kwargs["ipv6_prefixes"]:
+ prefixes_label = "ipv6_prefixes"
+ ip_prefix_label = "ipv6_prefix"
+ else:
+ prefixes_label = "prefixes"
+ ip_prefix_label = "ip_prefix"
+
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
- amazon_response = json.load(resp)['prefixes']
+ amazon_response = json.load(resp)[prefixes_label]
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
@@ -77,5 +86,5 @@
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
-
- return [item['ip_prefix'] for item in amazon_response]
+ iprange = [item[ip_prefix_label] for item in amazon_response]
+ return iprange
| {"golden_diff": "diff --git a/plugins/lookup/aws_service_ip_ranges.py b/plugins/lookup/aws_service_ip_ranges.py\n--- a/plugins/lookup/aws_service_ip_ranges.py\n+++ b/plugins/lookup/aws_service_ip_ranges.py\n@@ -19,6 +19,9 @@\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n+ ipv6_prefixes:\n+ description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses'\n+ version_added: 2.1.0\n '''\n \n EXAMPLES = \"\"\"\n@@ -40,7 +43,6 @@\n description: comma-separated list of CIDR ranges\n \"\"\"\n \n-\n import json\n \n from ansible.errors import AnsibleError\n@@ -55,9 +57,16 @@\n \n class LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n+ if \"ipv6_prefixes\" in kwargs and kwargs[\"ipv6_prefixes\"]:\n+ prefixes_label = \"ipv6_prefixes\"\n+ ip_prefix_label = \"ipv6_prefix\"\n+ else:\n+ prefixes_label = \"prefixes\"\n+ ip_prefix_label = \"ip_prefix\"\n+\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n- amazon_response = json.load(resp)['prefixes']\n+ amazon_response = json.load(resp)[prefixes_label]\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n@@ -77,5 +86,5 @@\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n-\n- return [item['ip_prefix'] for item in amazon_response]\n+ iprange = [item[ip_prefix_label] for item in amazon_response]\n+ return iprange\n", "issue": "aws_service_ip_ranges suppport for ipv6\n### Summary\r\n\r\nWe are using amazon.aws collection and we noticed that the aws_service_ip_ranges does not have an option to return IPv6 ranges.\r\n\r\n### Issue Type\r\n\r\nFeature Idea\r\n\r\n### Component Name\r\n\r\n`{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }`\r\nShould return a list of IPv6 addresses that correspond to the Route53 health check. \r\n\r\n\r\n### Pull Request\r\n#430\r\n\r\n### Additional Information\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```\r\nvars:\r\n rt53_ranges: \"{{ lookup('aws_service_ip_ranges', region='us-west-2', service='ROUTE53_HEALTHCHECKS', ipv6_prefix=True, wantlist=True) }}\"\r\ntasks:\r\n\r\n- name: \"use list return option and iterate as a loop\"\r\n debug: msg=\"{% for x in rt53_ranges %}{{ x }} {% endfor %}\"\r\n# ###\"2600:1f14:7ff:f800::/56,2600:1f14:fff:f800::/56\"\r\n```\r\n\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# (c) 2016 James Turner <[email protected]>\n# (c) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\nlookup: aws_service_ip_ranges\nauthor:\n - James Turner <[email protected]>\nrequirements:\n - must have public internet connectivity\nshort_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.\ndescription:\n - AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.\n - This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.\noptions:\n service:\n description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'\n region:\n description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'\n'''\n\nEXAMPLES = \"\"\"\nvars:\n ec2_ranges: \"{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}\"\ntasks:\n\n- name: \"use list return option and iterate as a loop\"\n debug: msg=\"{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}\"\n# \"52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 \"\n\n- name: \"Pull S3 IP ranges, and print the default return style\"\n debug: msg=\"{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}\"\n# \"52.92.16.0/20,52.216.0.0/15,54.231.0.0/17\"\n\"\"\"\n\nRETURN = \"\"\"\n_raw:\n description: comma-separated list of CIDR ranges\n\"\"\"\n\n\nimport json\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six.moves.urllib.error import HTTPError\nfrom ansible.module_utils.six.moves.urllib.error import URLError\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.urls import ConnectionError\nfrom ansible.module_utils.urls import open_url\nfrom ansible.module_utils.urls import SSLValidationError\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass LookupModule(LookupBase):\n def run(self, terms, variables, **kwargs):\n try:\n resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')\n amazon_response = json.load(resp)['prefixes']\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:\n # on Python 3+, json.decoder.JSONDecodeError is raised for bad\n # JSON. On 2.x it's a ValueError\n raise AnsibleError(\"Could not decode AWS IP ranges: %s\" % to_native(e))\n except HTTPError as e:\n raise AnsibleError(\"Received HTTP error while pulling IP ranges: %s\" % to_native(e))\n except SSLValidationError as e:\n raise AnsibleError(\"Error validating the server's certificate for: %s\" % to_native(e))\n except URLError as e:\n raise AnsibleError(\"Failed look up IP range service: %s\" % to_native(e))\n except ConnectionError as e:\n raise AnsibleError(\"Error connecting to IP range service: %s\" % to_native(e))\n\n if 'region' in kwargs:\n region = kwargs['region']\n amazon_response = (item for item in amazon_response if item['region'] == region)\n if 'service' in kwargs:\n service = str.upper(kwargs['service'])\n amazon_response = (item for item in amazon_response if item['service'] == service)\n\n return [item['ip_prefix'] for item in amazon_response]\n", "path": "plugins/lookup/aws_service_ip_ranges.py"}]} | 1,958 | 499 |
gh_patches_debug_32086 | rasdani/github-patches | git_diff | pypa__cibuildwheel-408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CIBW_ENVIRONMENT broken in 1.5.3
Somehow, my line of `export CIBW_ENVIRONMENT='... CCACHE_BASEDIR=`python -c "import tempfile; import os; print(os.path.realpath(tempfile.gettempdir()))"` ...'` (see https://github.com/YannickJadoul/Parselmouth/blob/1f3dd9abc63afdf4a1d26db40d210bb4187118a8/.travis.yml#L143; yes, I know I ought to clean this up) breaks:
```
+ /bin/true
+ mkdir -p /project
+ /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
+ uname -i
x86_64
+ python -c import 'tempfile;' import 'os;' 'print(os.path.realpath(tempfile.gettempdir()))'
File "<string>", line 1
import
```
See https://travis-ci.org/github/YannickJadoul/Parselmouth/jobs/709679114#L392
I'll start investigating, but is this perhaps related to #403, @joerick?
</issue>
<code>
[start of cibuildwheel/bashlex_eval.py]
1 import shlex
2 import subprocess
3 from typing import Callable, Dict, List, NamedTuple, Optional, Sequence
4
5 import bashlex # type: ignore
6
7 # a function that takes a shell command and the environment, and returns the result
8 EnvironmentExecutor = Callable[[str, Dict[str, str]], str]
9
10
11 def local_environment_executor(command: str, env: Dict[str, str]) -> str:
12 return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)
13
14
15 class NodeExecutionContext(NamedTuple):
16 environment: Dict[str, str]
17 input: str
18 executor: EnvironmentExecutor
19
20
21 def evaluate(value: str, environment: Dict[str, str], executor: Optional[EnvironmentExecutor] = None) -> str:
22 if not value:
23 # empty string evaluates to empty string
24 # (but trips up bashlex)
25 return ''
26
27 command_node = bashlex.parsesingle(value)
28
29 if len(command_node.parts) != 1:
30 raise ValueError(f'"{value}" has too many parts')
31
32 value_word_node = command_node.parts[0]
33
34 return evaluate_node(
35 value_word_node,
36 context=NodeExecutionContext(environment=environment, input=value, executor=executor or local_environment_executor)
37 )
38
39
40 def evaluate_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
41 if node.kind == 'word':
42 return evaluate_word_node(node, context=context)
43 elif node.kind == 'commandsubstitution':
44 node_result = evaluate_command_node(node.command, context=context)
45 # bash removes training newlines in command substitution
46 return node_result.rstrip()
47 elif node.kind == 'parameter':
48 return evaluate_parameter_node(node, context=context)
49 else:
50 raise ValueError(f'Unsupported bash construct: "{node.kind}"')
51
52
53 def evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
54 value = node.word
55
56 for part in node.parts:
57 part_string = context.input[part.pos[0]:part.pos[1]]
58 part_value = evaluate_node(part, context=context)
59
60 if part_string not in value:
61 raise RuntimeError(
62 'bash parse failed. part "{}" not found in "{}". Word was "{}". Full input was "{}"'.format(
63 part_string, value, node.word, context.input,
64 )
65 )
66
67 value = value.replace(part_string, part_value, 1)
68
69 return value
70
71
72 def evaluate_command_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
73 if any(n.kind == 'operator' for n in node.parts):
74 return evaluate_nodes_as_compound_command(node.parts, context=context)
75 else:
76 return evaluate_nodes_as_simple_command(node.parts, context=context)
77
78
79 def evaluate_nodes_as_compound_command(nodes: Sequence[bashlex.ast.node], context: NodeExecutionContext) -> str:
80 # bashlex doesn't support any operators besides ';' inside command
81 # substitutions, so we only need to handle that case. We do so assuming
82 # that `set -o errexit` is on, because it's easier to code!
83
84 result = ''
85 for node in nodes:
86 if node.kind == 'command':
87 result += evaluate_command_node(node, context=context)
88 elif node.kind == 'operator':
89 if node.op == ';':
90 pass
91 else:
92 raise ValueError(f'Unsupported bash operator: "{node.op}"')
93 else:
94 raise ValueError(f'Unsupported bash node in compound command: "{node.kind}"')
95
96 return result
97
98
99 def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):
100 words = [evaluate_node(part, context=context) for part in nodes]
101 command = ' '.join(words)
102 return context.executor(command, context.environment)
103
104
105 def evaluate_parameter_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
106 return context.environment.get(node.value, '')
107
[end of cibuildwheel/bashlex_eval.py]
[start of cibuildwheel/docker_container.py]
1 import io
2 import json
3 import os
4 import shlex
5 import subprocess
6 import sys
7 import uuid
8 from os import PathLike
9 from pathlib import Path, PurePath
10 from typing import IO, Dict, List, Optional, Sequence, Union
11
12
13 class DockerContainer:
14 '''
15 An object that represents a running Docker container.
16
17 Intended for use as a context manager e.g.
18 `with DockerContainer('ubuntu') as docker:`
19
20 A bash shell is running in the remote container. When `call()` is invoked,
21 the command is relayed to the remote shell, and the results are streamed
22 back to cibuildwheel.
23 '''
24 UTILITY_PYTHON = '/opt/python/cp38-cp38/bin/python'
25
26 process: subprocess.Popen
27 bash_stdin: IO[bytes]
28 bash_stdout: IO[bytes]
29
30 def __init__(self, docker_image: str, simulate_32_bit=False):
31 self.docker_image = docker_image
32 self.simulate_32_bit = simulate_32_bit
33
34 def __enter__(self) -> 'DockerContainer':
35 self.name = f'cibuildwheel-{uuid.uuid4()}'
36 shell_args = ['linux32', '/bin/bash'] if self.simulate_32_bit else ['/bin/bash']
37 subprocess.run(
38 [
39 'docker', 'create',
40 '--env', 'CIBUILDWHEEL',
41 '--name', self.name,
42 '-i',
43 '-v', '/:/host', # ignored on CircleCI
44 self.docker_image,
45 *shell_args
46 ],
47 check=True,
48 )
49 self.process = subprocess.Popen(
50 [
51 'docker', 'start',
52 '--attach', '--interactive',
53 self.name,
54 ],
55 stdin=subprocess.PIPE,
56 stdout=subprocess.PIPE,
57 )
58
59 assert self.process.stdin and self.process.stdout
60 self.bash_stdin = self.process.stdin
61 self.bash_stdout = self.process.stdout
62
63 # run a noop command to block until the container is responding
64 self.call(['/bin/true'])
65
66 return self
67
68 def __exit__(self, exc_type, exc_val, exc_tb):
69 self.bash_stdin.close()
70 self.process.terminate()
71 self.process.wait()
72
73 subprocess.run(['docker', 'rm', '--force', '-v', self.name])
74 self.name = None
75
76 def copy_into(self, from_path: Path, to_path: PurePath) -> None:
77 # `docker cp` causes 'no space left on device' error when
78 # a container is running and the host filesystem is
79 # mounted. https://github.com/moby/moby/issues/38995
80 # Use `docker exec` instead.
81
82 if from_path.is_dir():
83 self.call(['mkdir', '-p', to_path])
84 subprocess.run(
85 f'tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -',
86 shell=True,
87 check=True,
88 cwd=from_path)
89 else:
90 subprocess.run(
91 f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c "cat > {shell_quote(to_path)}"',
92 shell=True,
93 check=True)
94
95 def copy_out(self, from_path: PurePath, to_path: Path) -> None:
96 # note: we assume from_path is a dir
97 to_path.mkdir(parents=True, exist_ok=True)
98
99 subprocess.run(
100 f'docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -',
101 shell=True,
102 check=True,
103 cwd=to_path
104 )
105
106 def glob(self, path: PurePath, pattern: str) -> List[PurePath]:
107 glob_pattern = os.path.join(str(path), pattern)
108
109 path_strs = json.loads(self.call([
110 self.UTILITY_PYTHON,
111 '-c',
112 f'import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)'
113 ], capture_output=True))
114
115 return [PurePath(p) for p in path_strs]
116
117 def call(self, args: Sequence[Union[str, PathLike]], env: Dict[str, str] = {},
118 capture_output=False, cwd: Optional[Union[str, PathLike]] = None) -> str:
119 chdir = f'cd {cwd}' if cwd else ''
120 env_assignments = ' '.join(f'{shlex.quote(k)}={shlex.quote(v)}'
121 for k, v in env.items())
122 command = ' '.join(shlex.quote(str(a)) for a in args)
123 end_of_message = str(uuid.uuid4())
124
125 # log the command we're executing
126 print(f' + {command}')
127
128 # Write a command to the remote shell. First we change the
129 # cwd, if that's required. Then, we use the `env` utility to run
130 # `command` inside the specified environment. We use `env` because it
131 # can cope with spaces and strange characters in the name or value.
132 # Finally, the remote shell is told to write a footer - this will show
133 # up in the output so we know when to stop reading, and will include
134 # the returncode of `command`.
135 self.bash_stdin.write(bytes(f'''(
136 {chdir}
137 env {env_assignments} {command}
138 printf "%04d%s\n" $? {end_of_message}
139 )
140 ''', encoding='utf8', errors='surrogateescape'))
141 self.bash_stdin.flush()
142
143 if capture_output:
144 output_io: IO[bytes] = io.BytesIO()
145 else:
146 output_io = sys.stdout.buffer
147
148 while True:
149 line = self.bash_stdout.readline()
150
151 if line.endswith(b'%s\n' % (bytes(end_of_message, encoding='utf8'))):
152 footer_offset = (
153 len(line)
154 - 1 # newline character
155 - len(end_of_message) # delimiter
156 - 4 # 4 returncode decimals
157 )
158 returncode_str = line[footer_offset:footer_offset+4]
159 returncode = int(returncode_str)
160 # add the last line to output, without the footer
161 output_io.write(line[0:footer_offset])
162 break
163 else:
164 output_io.write(line)
165
166 if isinstance(output_io, io.BytesIO):
167 output = str(output_io.getvalue(), encoding='utf8', errors='surrogateescape')
168 else:
169 output = ''
170
171 if returncode != 0:
172 raise subprocess.CalledProcessError(returncode, args, output)
173
174 return output
175
176 def get_environment(self) -> Dict[str, str]:
177 return json.loads(self.call([
178 self.UTILITY_PYTHON,
179 '-c',
180 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
181 ], capture_output=True))
182
183 def environment_executor(self, command: str, environment: Dict[str, str]) -> str:
184 # used as an EnvironmentExecutor to evaluate commands and capture output
185 return self.call(shlex.split(command), env=environment)
186
187
188 def shell_quote(path: PurePath) -> str:
189 return shlex.quote(str(path))
190
[end of cibuildwheel/docker_container.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cibuildwheel/bashlex_eval.py b/cibuildwheel/bashlex_eval.py
--- a/cibuildwheel/bashlex_eval.py
+++ b/cibuildwheel/bashlex_eval.py
@@ -1,15 +1,14 @@
-import shlex
import subprocess
from typing import Callable, Dict, List, NamedTuple, Optional, Sequence
import bashlex # type: ignore
-# a function that takes a shell command and the environment, and returns the result
-EnvironmentExecutor = Callable[[str, Dict[str, str]], str]
+# a function that takes a command and the environment, and returns the result
+EnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]
-def local_environment_executor(command: str, env: Dict[str, str]) -> str:
- return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)
+def local_environment_executor(command: List[str], env: Dict[str, str]) -> str:
+ return subprocess.check_output(command, env=env, universal_newlines=True)
class NodeExecutionContext(NamedTuple):
@@ -97,8 +96,7 @@
def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):
- words = [evaluate_node(part, context=context) for part in nodes]
- command = ' '.join(words)
+ command = [evaluate_node(part, context=context) for part in nodes]
return context.executor(command, context.environment)
diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py
--- a/cibuildwheel/docker_container.py
+++ b/cibuildwheel/docker_container.py
@@ -180,9 +180,9 @@
'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
], capture_output=True))
- def environment_executor(self, command: str, environment: Dict[str, str]) -> str:
+ def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:
# used as an EnvironmentExecutor to evaluate commands and capture output
- return self.call(shlex.split(command), env=environment)
+ return self.call(command, env=environment)
def shell_quote(path: PurePath) -> str:
| {"golden_diff": "diff --git a/cibuildwheel/bashlex_eval.py b/cibuildwheel/bashlex_eval.py\n--- a/cibuildwheel/bashlex_eval.py\n+++ b/cibuildwheel/bashlex_eval.py\n@@ -1,15 +1,14 @@\n-import shlex\n import subprocess\n from typing import Callable, Dict, List, NamedTuple, Optional, Sequence\n \n import bashlex # type: ignore\n \n-# a function that takes a shell command and the environment, and returns the result\n-EnvironmentExecutor = Callable[[str, Dict[str, str]], str]\n+# a function that takes a command and the environment, and returns the result\n+EnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]\n \n \n-def local_environment_executor(command: str, env: Dict[str, str]) -> str:\n- return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)\n+def local_environment_executor(command: List[str], env: Dict[str, str]) -> str:\n+ return subprocess.check_output(command, env=env, universal_newlines=True)\n \n \n class NodeExecutionContext(NamedTuple):\n@@ -97,8 +96,7 @@\n \n \n def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):\n- words = [evaluate_node(part, context=context) for part in nodes]\n- command = ' '.join(words)\n+ command = [evaluate_node(part, context=context) for part in nodes]\n return context.executor(command, context.environment)\n \n \ndiff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py\n--- a/cibuildwheel/docker_container.py\n+++ b/cibuildwheel/docker_container.py\n@@ -180,9 +180,9 @@\n 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\n ], capture_output=True))\n \n- def environment_executor(self, command: str, environment: Dict[str, str]) -> str:\n+ def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n- return self.call(shlex.split(command), env=environment)\n+ return self.call(command, env=environment)\n \n \n def shell_quote(path: PurePath) -> str:\n", "issue": "CIBW_ENVIRONMENT broken in 1.5.3\nSomehow, my line of `export CIBW_ENVIRONMENT='... CCACHE_BASEDIR=`python -c \"import tempfile; import os; print(os.path.realpath(tempfile.gettempdir()))\"` ...'` (see https://github.com/YannickJadoul/Parselmouth/blob/1f3dd9abc63afdf4a1d26db40d210bb4187118a8/.travis.yml#L143; yes, I know I ought to clean this up) breaks:\r\n\r\n```\r\n + /bin/true\r\n + mkdir -p /project\r\n + /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\r\n + uname -i\r\nx86_64\r\n + python -c import 'tempfile;' import 'os;' 'print(os.path.realpath(tempfile.gettempdir()))'\r\n File \"<string>\", line 1\r\n import\r\n```\r\n\r\nSee https://travis-ci.org/github/YannickJadoul/Parselmouth/jobs/709679114#L392\r\n\r\nI'll start investigating, but is this perhaps related to #403, @joerick?\n", "before_files": [{"content": "import shlex\nimport subprocess\nfrom typing import Callable, Dict, List, NamedTuple, Optional, Sequence\n\nimport bashlex # type: ignore\n\n# a function that takes a shell command and the environment, and returns the result\nEnvironmentExecutor = Callable[[str, Dict[str, str]], str]\n\n\ndef local_environment_executor(command: str, env: Dict[str, str]) -> str:\n return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)\n\n\nclass NodeExecutionContext(NamedTuple):\n environment: Dict[str, str]\n input: str\n executor: EnvironmentExecutor\n\n\ndef evaluate(value: str, environment: Dict[str, str], executor: Optional[EnvironmentExecutor] = None) -> str:\n if not value:\n # empty string evaluates to empty string\n # (but trips up bashlex)\n return ''\n\n command_node = bashlex.parsesingle(value)\n\n if len(command_node.parts) != 1:\n raise ValueError(f'\"{value}\" has too many parts')\n\n value_word_node = command_node.parts[0]\n\n return evaluate_node(\n value_word_node,\n context=NodeExecutionContext(environment=environment, input=value, executor=executor or local_environment_executor)\n )\n\n\ndef evaluate_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n if node.kind == 'word':\n return evaluate_word_node(node, context=context)\n elif node.kind == 'commandsubstitution':\n node_result = evaluate_command_node(node.command, context=context)\n # bash removes training newlines in command substitution\n return node_result.rstrip()\n elif node.kind == 'parameter':\n return evaluate_parameter_node(node, context=context)\n else:\n raise ValueError(f'Unsupported bash construct: \"{node.kind}\"')\n\n\ndef evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n value = node.word\n\n for part in node.parts:\n part_string = context.input[part.pos[0]:part.pos[1]]\n part_value = evaluate_node(part, context=context)\n\n if part_string not in value:\n raise RuntimeError(\n 'bash parse failed. part \"{}\" not found in \"{}\". Word was \"{}\". Full input was \"{}\"'.format(\n part_string, value, node.word, context.input,\n )\n )\n\n value = value.replace(part_string, part_value, 1)\n\n return value\n\n\ndef evaluate_command_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n if any(n.kind == 'operator' for n in node.parts):\n return evaluate_nodes_as_compound_command(node.parts, context=context)\n else:\n return evaluate_nodes_as_simple_command(node.parts, context=context)\n\n\ndef evaluate_nodes_as_compound_command(nodes: Sequence[bashlex.ast.node], context: NodeExecutionContext) -> str:\n # bashlex doesn't support any operators besides ';' inside command\n # substitutions, so we only need to handle that case. We do so assuming\n # that `set -o errexit` is on, because it's easier to code!\n\n result = ''\n for node in nodes:\n if node.kind == 'command':\n result += evaluate_command_node(node, context=context)\n elif node.kind == 'operator':\n if node.op == ';':\n pass\n else:\n raise ValueError(f'Unsupported bash operator: \"{node.op}\"')\n else:\n raise ValueError(f'Unsupported bash node in compound command: \"{node.kind}\"')\n\n return result\n\n\ndef evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):\n words = [evaluate_node(part, context=context) for part in nodes]\n command = ' '.join(words)\n return context.executor(command, context.environment)\n\n\ndef evaluate_parameter_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:\n return context.environment.get(node.value, '')\n", "path": "cibuildwheel/bashlex_eval.py"}, {"content": "import io\nimport json\nimport os\nimport shlex\nimport subprocess\nimport sys\nimport uuid\nfrom os import PathLike\nfrom pathlib import Path, PurePath\nfrom typing import IO, Dict, List, Optional, Sequence, Union\n\n\nclass DockerContainer:\n '''\n An object that represents a running Docker container.\n\n Intended for use as a context manager e.g.\n `with DockerContainer('ubuntu') as docker:`\n\n A bash shell is running in the remote container. When `call()` is invoked,\n the command is relayed to the remote shell, and the results are streamed\n back to cibuildwheel.\n '''\n UTILITY_PYTHON = '/opt/python/cp38-cp38/bin/python'\n\n process: subprocess.Popen\n bash_stdin: IO[bytes]\n bash_stdout: IO[bytes]\n\n def __init__(self, docker_image: str, simulate_32_bit=False):\n self.docker_image = docker_image\n self.simulate_32_bit = simulate_32_bit\n\n def __enter__(self) -> 'DockerContainer':\n self.name = f'cibuildwheel-{uuid.uuid4()}'\n shell_args = ['linux32', '/bin/bash'] if self.simulate_32_bit else ['/bin/bash']\n subprocess.run(\n [\n 'docker', 'create',\n '--env', 'CIBUILDWHEEL',\n '--name', self.name,\n '-i',\n '-v', '/:/host', # ignored on CircleCI\n self.docker_image,\n *shell_args\n ],\n check=True,\n )\n self.process = subprocess.Popen(\n [\n 'docker', 'start',\n '--attach', '--interactive',\n self.name,\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n\n assert self.process.stdin and self.process.stdout\n self.bash_stdin = self.process.stdin\n self.bash_stdout = self.process.stdout\n\n # run a noop command to block until the container is responding\n self.call(['/bin/true'])\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.bash_stdin.close()\n self.process.terminate()\n self.process.wait()\n\n subprocess.run(['docker', 'rm', '--force', '-v', self.name])\n self.name = None\n\n def copy_into(self, from_path: Path, to_path: PurePath) -> None:\n # `docker cp` causes 'no space left on device' error when\n # a container is running and the host filesystem is\n # mounted. https://github.com/moby/moby/issues/38995\n # Use `docker exec` instead.\n\n if from_path.is_dir():\n self.call(['mkdir', '-p', to_path])\n subprocess.run(\n f'tar cf - . | docker exec -i {self.name} tar -xC {shell_quote(to_path)} -f -',\n shell=True,\n check=True,\n cwd=from_path)\n else:\n subprocess.run(\n f'cat {shell_quote(from_path)} | docker exec -i {self.name} sh -c \"cat > {shell_quote(to_path)}\"',\n shell=True,\n check=True)\n\n def copy_out(self, from_path: PurePath, to_path: Path) -> None:\n # note: we assume from_path is a dir\n to_path.mkdir(parents=True, exist_ok=True)\n\n subprocess.run(\n f'docker exec -i {self.name} tar -cC {shell_quote(from_path)} -f - . | tar -xf -',\n shell=True,\n check=True,\n cwd=to_path\n )\n\n def glob(self, path: PurePath, pattern: str) -> List[PurePath]:\n glob_pattern = os.path.join(str(path), pattern)\n\n path_strs = json.loads(self.call([\n self.UTILITY_PYTHON,\n '-c',\n f'import sys, json, glob; json.dump(glob.glob({glob_pattern!r}), sys.stdout)'\n ], capture_output=True))\n\n return [PurePath(p) for p in path_strs]\n\n def call(self, args: Sequence[Union[str, PathLike]], env: Dict[str, str] = {},\n capture_output=False, cwd: Optional[Union[str, PathLike]] = None) -> str:\n chdir = f'cd {cwd}' if cwd else ''\n env_assignments = ' '.join(f'{shlex.quote(k)}={shlex.quote(v)}'\n for k, v in env.items())\n command = ' '.join(shlex.quote(str(a)) for a in args)\n end_of_message = str(uuid.uuid4())\n\n # log the command we're executing\n print(f' + {command}')\n\n # Write a command to the remote shell. First we change the\n # cwd, if that's required. Then, we use the `env` utility to run\n # `command` inside the specified environment. We use `env` because it\n # can cope with spaces and strange characters in the name or value.\n # Finally, the remote shell is told to write a footer - this will show\n # up in the output so we know when to stop reading, and will include\n # the returncode of `command`.\n self.bash_stdin.write(bytes(f'''(\n {chdir}\n env {env_assignments} {command}\n printf \"%04d%s\\n\" $? {end_of_message}\n )\n ''', encoding='utf8', errors='surrogateescape'))\n self.bash_stdin.flush()\n\n if capture_output:\n output_io: IO[bytes] = io.BytesIO()\n else:\n output_io = sys.stdout.buffer\n\n while True:\n line = self.bash_stdout.readline()\n\n if line.endswith(b'%s\\n' % (bytes(end_of_message, encoding='utf8'))):\n footer_offset = (\n len(line)\n - 1 # newline character\n - len(end_of_message) # delimiter\n - 4 # 4 returncode decimals\n )\n returncode_str = line[footer_offset:footer_offset+4]\n returncode = int(returncode_str)\n # add the last line to output, without the footer\n output_io.write(line[0:footer_offset])\n break\n else:\n output_io.write(line)\n\n if isinstance(output_io, io.BytesIO):\n output = str(output_io.getvalue(), encoding='utf8', errors='surrogateescape')\n else:\n output = ''\n\n if returncode != 0:\n raise subprocess.CalledProcessError(returncode, args, output)\n\n return output\n\n def get_environment(self) -> Dict[str, str]:\n return json.loads(self.call([\n self.UTILITY_PYTHON,\n '-c',\n 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'\n ], capture_output=True))\n\n def environment_executor(self, command: str, environment: Dict[str, str]) -> str:\n # used as an EnvironmentExecutor to evaluate commands and capture output\n return self.call(shlex.split(command), env=environment)\n\n\ndef shell_quote(path: PurePath) -> str:\n return shlex.quote(str(path))\n", "path": "cibuildwheel/docker_container.py"}]} | 3,948 | 496 |
gh_patches_debug_57019 | rasdani/github-patches | git_diff | fidals__shopelectro-415 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Убери пункт меню
Убери mp3 колонки из меню. Их больше не будет.
Я зашел в админку и выключил категорию. Надеюсь правильно )
http://prntscr.com/k553lt
</issue>
<code>
[start of shopelectro/templatetags/se_extras.py]
1 import datetime
2 import math
3
4 from django import template
5 from django.conf import settings
6 from django.contrib.humanize.templatetags.humanize import intcomma
7 from django.template.defaultfilters import floatformat
8 from django.urls import reverse
9
10 from images.models import ImageMixin
11 from pages.models import Page
12
13 from shopelectro import config
14 from shopelectro.models import Category
15
16 register = template.Library()
17
18
19 # TODO - move it in catalog. Inspired by lp_electric
20 @register.simple_tag
21 def roots():
22 return sorted(
23 Category.objects
24 .select_related('page')
25 .get_cached_trees(), # https://goo.gl/rFKiku
26 key=lambda x: x.page.position
27 )
28
29
30 @register.simple_tag
31 def footer_links():
32 return config.FOOTER_LINKS
33
34
35 # TODO - move in pages. Inspired by LP electric
36 @register.filter
37 def class_name(model):
38 """Return Model name."""
39 return type(model).__name__
40
41
42 @register.simple_tag
43 def time_to_call():
44 def is_weekend(t):
45 return t.weekday() > 4
46
47 def is_friday(t):
48 return t.weekday() == 4
49
50 def not_yet_opened(t):
51 current_time = (t.hour, t.minute)
52 open_time = (10, 00)
53 return current_time < open_time and not is_weekend(t)
54
55 def is_closed(t):
56 current_time = (t.hour, t.minute)
57 closing_time = (16, 30) if is_friday(t) else (17, 30)
58 return current_time > closing_time
59
60 when_we_call = {
61 lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): 'В понедельник в 10:30',
62 lambda now: not_yet_opened(now): 'Сегодня в 10:30',
63 lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): 'Завтра в 10:30',
64 lambda _: True: 'В течение 30 минут'
65 }
66
67 time_ = datetime.datetime.now()
68 call = ' позвонит менеджер и обсудит детали доставки.'
69 for condition, time in when_we_call.items():
70 if condition(time_):
71 return time + call
72
73
74 # TODO - move it in pages.
75 @register.simple_tag
76 def full_url(url_name, *args):
77 return settings.BASE_URL + reverse(url_name, args=args)
78
79
80 @register.filter
81 def humanize_price(price):
82 return intcomma(floatformat(price, 0))
83
84
85 # Not good code, but duker at 06/10/2016 don't know how to fix it.
86 # It makes Image model very complex.
87 @register.simple_tag
88 def get_img_alt(entity: ImageMixin):
89 product_alt = 'Фотография {}'
90 logo_alt = 'Логотип компании Shopelectro'
91
92 if not isinstance(entity, Page):
93 return logo_alt
94
95 # try one of this attributes to get pages name
96 name_attrs = ['h1', 'title', 'name']
97 entity_name = next(
98 getattr(entity, attr)
99 for attr in name_attrs
100 if getattr(entity, attr)
101 )
102 return product_alt.format(entity_name)
103
104
105 @register.simple_tag
106 def main_image_or_logo(page: Page):
107 """Used for microdata."""
108 if hasattr(page, 'main_image') and page.main_image:
109 return page.main_image.url
110 else:
111 return settings.STATIC_URL + 'images/logo.png'
112
113
114 @register.inclusion_tag('catalog/product_feedbacks_icons.html')
115 def icon_stars(rating=0):
116 """Render set of rating icons based on 1 through 5 rating values."""
117 full_icons = int(math.floor(rating))
118 half_icons = 0 if rating == int(rating) else 1
119 empty_icons = 5 - full_icons - half_icons
120
121 return {
122 'full_icons': range(full_icons),
123 'half_icons': range(half_icons),
124 'empty_icons': range(empty_icons),
125 }
126
[end of shopelectro/templatetags/se_extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py
--- a/shopelectro/templatetags/se_extras.py
+++ b/shopelectro/templatetags/se_extras.py
@@ -20,10 +20,13 @@
@register.simple_tag
def roots():
return sorted(
- Category.objects
- .select_related('page')
- .get_cached_trees(), # https://goo.gl/rFKiku
- key=lambda x: x.page.position
+ filter(
+ lambda x: x.page.is_active,
+ Category.objects # https://goo.gl/rFKiku
+ .select_related('page')
+ .get_cached_trees()
+ ),
+ key=lambda x: x.page.position,
)
| {"golden_diff": "diff --git a/shopelectro/templatetags/se_extras.py b/shopelectro/templatetags/se_extras.py\n--- a/shopelectro/templatetags/se_extras.py\n+++ b/shopelectro/templatetags/se_extras.py\n@@ -20,10 +20,13 @@\n @register.simple_tag\n def roots():\n return sorted(\n- Category.objects\n- .select_related('page')\n- .get_cached_trees(), # https://goo.gl/rFKiku\n- key=lambda x: x.page.position\n+ filter(\n+ lambda x: x.page.is_active,\n+ Category.objects # https://goo.gl/rFKiku\n+ .select_related('page')\n+ .get_cached_trees()\n+ ),\n+ key=lambda x: x.page.position,\n )\n", "issue": "\u0423\u0431\u0435\u0440\u0438 \u043f\u0443\u043d\u043a\u0442 \u043c\u0435\u043d\u044e\n\u0423\u0431\u0435\u0440\u0438 mp3 \u043a\u043e\u043b\u043e\u043d\u043a\u0438 \u0438\u0437 \u043c\u0435\u043d\u044e. \u0418\u0445 \u0431\u043e\u043b\u044c\u0448\u0435 \u043d\u0435 \u0431\u0443\u0434\u0435\u0442.\r\n\u042f \u0437\u0430\u0448\u0435\u043b \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443 \u0438 \u0432\u044b\u043a\u043b\u044e\u0447\u0438\u043b \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044e. \u041d\u0430\u0434\u0435\u044e\u0441\u044c \u043f\u0440\u0430\u0432\u0438\u043b\u044c\u043d\u043e ) \r\nhttp://prntscr.com/k553lt\n", "before_files": [{"content": "import datetime\nimport math\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\n\nfrom images.models import ImageMixin\nfrom pages.models import Page\n\nfrom shopelectro import config\nfrom shopelectro.models import Category\n\nregister = template.Library()\n\n\n# TODO - move it in catalog. Inspired by lp_electric\[email protected]_tag\ndef roots():\n return sorted(\n Category.objects\n .select_related('page')\n .get_cached_trees(), # https://goo.gl/rFKiku\n key=lambda x: x.page.position\n )\n\n\[email protected]_tag\ndef footer_links():\n return config.FOOTER_LINKS\n\n\n# TODO - move in pages. Inspired by LP electric\[email protected]\ndef class_name(model):\n \"\"\"Return Model name.\"\"\"\n return type(model).__name__\n\n\[email protected]_tag\ndef time_to_call():\n def is_weekend(t):\n return t.weekday() > 4\n\n def is_friday(t):\n return t.weekday() == 4\n\n def not_yet_opened(t):\n current_time = (t.hour, t.minute)\n open_time = (10, 00)\n return current_time < open_time and not is_weekend(t)\n\n def is_closed(t):\n current_time = (t.hour, t.minute)\n closing_time = (16, 30) if is_friday(t) else (17, 30)\n return current_time > closing_time\n\n when_we_call = {\n lambda now: is_weekend(now) or (is_friday(now) and is_closed(now)): '\u0412 \u043f\u043e\u043d\u0435\u0434\u0435\u043b\u044c\u043d\u0438\u043a \u0432 10:30',\n lambda now: not_yet_opened(now): '\u0421\u0435\u0433\u043e\u0434\u043d\u044f \u0432 10:30',\n lambda now: is_closed(now) and not (is_friday(now) or is_weekend(now)): '\u0417\u0430\u0432\u0442\u0440\u0430 \u0432 10:30',\n lambda _: True: '\u0412 \u0442\u0435\u0447\u0435\u043d\u0438\u0435 30 \u043c\u0438\u043d\u0443\u0442'\n }\n\n time_ = datetime.datetime.now()\n call = ' \u043f\u043e\u0437\u0432\u043e\u043d\u0438\u0442 \u043c\u0435\u043d\u0435\u0434\u0436\u0435\u0440 \u0438 \u043e\u0431\u0441\u0443\u0434\u0438\u0442 \u0434\u0435\u0442\u0430\u043b\u0438 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0438.'\n for condition, time in when_we_call.items():\n if condition(time_):\n return time + call\n\n\n# TODO - move it in pages.\[email protected]_tag\ndef full_url(url_name, *args):\n return settings.BASE_URL + reverse(url_name, args=args)\n\n\[email protected]\ndef humanize_price(price):\n return intcomma(floatformat(price, 0))\n\n\n# Not good code, but duker at 06/10/2016 don't know how to fix it.\n# It makes Image model very complex.\[email protected]_tag\ndef get_img_alt(entity: ImageMixin):\n product_alt = '\u0424\u043e\u0442\u043e\u0433\u0440\u0430\u0444\u0438\u044f {}'\n logo_alt = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n if not isinstance(entity, Page):\n return logo_alt\n\n # try one of this attributes to get pages name\n name_attrs = ['h1', 'title', 'name']\n entity_name = next(\n getattr(entity, attr)\n for attr in name_attrs\n if getattr(entity, attr)\n )\n return product_alt.format(entity_name)\n\n\[email protected]_tag\ndef main_image_or_logo(page: Page):\n \"\"\"Used for microdata.\"\"\"\n if hasattr(page, 'main_image') and page.main_image:\n return page.main_image.url\n else:\n return settings.STATIC_URL + 'images/logo.png'\n\n\[email protected]_tag('catalog/product_feedbacks_icons.html')\ndef icon_stars(rating=0):\n \"\"\"Render set of rating icons based on 1 through 5 rating values.\"\"\"\n full_icons = int(math.floor(rating))\n half_icons = 0 if rating == int(rating) else 1\n empty_icons = 5 - full_icons - half_icons\n\n return {\n 'full_icons': range(full_icons),\n 'half_icons': range(half_icons),\n 'empty_icons': range(empty_icons),\n }\n", "path": "shopelectro/templatetags/se_extras.py"}]} | 1,792 | 188 |
gh_patches_debug_9311 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-3578 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pyside2 looking different from when running as script.
Hi. Im using latest development version.
As you can see when i run my test script:
```
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(150, 90, 97, 34))
self.pushButton.setObjectName("pushButton")
self.tableWidget = QtWidgets.QTableWidget(self.tab)
self.tableWidget.setGeometry(QtCore.QRect(140, 150, 256, 192))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "New Column"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "New Column"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "New Column"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
```
from console:

and as the binary pyinstaller creates:

It looks very different(ignore the blue line, that's something that happened recently on this comp for some reason but the problem existed previous to this). Perhaps this has something to do with the themes in qt5_plugins? Anyway, how can i fix this? Ideally it would be possible to bundle my theme "breeze dark" with the application so the user has the ability to use that.
</issue>
<code>
[start of PyInstaller/hooks/hook-PySide2.QtGui.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2018, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 from PyInstaller.utils.hooks import qt_plugins_binaries
11 from PyInstaller.compat import is_linux
12
13 hiddenimports = ['PySide2.QtCore']
14
15 binaries = []
16 binaries.extend(qt_plugins_binaries('accessible', namespace='PySide2'))
17 binaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2'))
18 binaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2'))
19 binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))
20 binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))
21 binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))
22
23 if is_linux:
24 binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))
25
[end of PyInstaller/hooks/hook-PySide2.QtGui.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-PySide2.QtGui.py b/PyInstaller/hooks/hook-PySide2.QtGui.py
--- a/PyInstaller/hooks/hook-PySide2.QtGui.py
+++ b/PyInstaller/hooks/hook-PySide2.QtGui.py
@@ -19,6 +19,7 @@
binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))
binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))
binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))
+binaries.extend(qt_plugins_binaries('styles', namespace='PySide2'))
if is_linux:
binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PySide2.QtGui.py b/PyInstaller/hooks/hook-PySide2.QtGui.py\n--- a/PyInstaller/hooks/hook-PySide2.QtGui.py\n+++ b/PyInstaller/hooks/hook-PySide2.QtGui.py\n@@ -19,6 +19,7 @@\n binaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\n binaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\n binaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\n+binaries.extend(qt_plugins_binaries('styles', namespace='PySide2'))\n \n if is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "issue": "Pyside2 looking different from when running as script.\nHi. Im using latest development version.\r\n\r\nAs you can see when i run my test script:\r\n```\r\nfrom PySide2 import QtCore, QtGui, QtWidgets\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)\r\n self.gridLayout.setObjectName(\"gridLayout\")\r\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\r\n self.tabWidget.setObjectName(\"tabWidget\")\r\n self.tab = QtWidgets.QWidget()\r\n self.tab.setObjectName(\"tab\")\r\n self.pushButton = QtWidgets.QPushButton(self.tab)\r\n self.pushButton.setGeometry(QtCore.QRect(150, 90, 97, 34))\r\n self.pushButton.setObjectName(\"pushButton\")\r\n self.tableWidget = QtWidgets.QTableWidget(self.tab)\r\n self.tableWidget.setGeometry(QtCore.QRect(140, 150, 256, 192))\r\n self.tableWidget.setObjectName(\"tableWidget\")\r\n self.tableWidget.setColumnCount(3)\r\n self.tableWidget.setRowCount(0)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(0, item)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(1, item)\r\n item = QtWidgets.QTableWidgetItem()\r\n self.tableWidget.setHorizontalHeaderItem(2, item)\r\n self.tabWidget.addTab(self.tab, \"\")\r\n self.tab_2 = QtWidgets.QWidget()\r\n self.tab_2.setObjectName(\"tab_2\")\r\n self.tabWidget.addTab(self.tab_2, \"\")\r\n self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 30))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.pushButton.setText(_translate(\"MainWindow\", \"PushButton\"))\r\n item = self.tableWidget.horizontalHeaderItem(0)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n item = self.tableWidget.horizontalHeaderItem(1)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n item = self.tableWidget.horizontalHeaderItem(2)\r\n item.setText(_translate(\"MainWindow\", \"New Column\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"Tab 1\"))\r\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"Tab 2\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n```\r\n\r\nfrom console:\r\n\r\nand as the binary pyinstaller creates:\r\n\r\n\r\nIt looks very different(ignore the blue line, that's something that happened recently on this comp for some reason but the problem existed previous to this). Perhaps this has something to do with the themes in qt5_plugins? Anyway, how can i fix this? Ideally it would be possible to bundle my theme \"breeze dark\" with the application so the user has the ability to use that.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import qt_plugins_binaries\nfrom PyInstaller.compat import is_linux\n\nhiddenimports = ['PySide2.QtCore']\n\nbinaries = []\nbinaries.extend(qt_plugins_binaries('accessible', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('iconengines', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('imageformats', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('inputmethods', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('graphicssystems', namespace='PySide2'))\nbinaries.extend(qt_plugins_binaries('platforms', namespace='PySide2'))\n\nif is_linux:\n binaries.extend(qt_plugins_binaries('platformthemes', namespace='PySide2'))\n", "path": "PyInstaller/hooks/hook-PySide2.QtGui.py"}]} | 1,753 | 169 |
gh_patches_debug_5669 | rasdani/github-patches | git_diff | lutris__lutris-3705 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Store banners in .cache
They have no place to be in .local/share
Store banners in .cache
They have no place to be in .local/share
</issue>
<code>
[start of lutris/settings.py]
1 """Internal settings."""
2 import os
3 import sys
4 from gettext import gettext as _
5
6 from gi.repository import GLib
7
8 from lutris import __version__
9 from lutris.util.settings import SettingsIO
10
11 PROJECT = "Lutris"
12 VERSION = __version__
13 COPYRIGHT = _("(c) 2010-2021 Lutris Team")
14 AUTHORS = [_("The Lutris team")]
15
16 # Paths
17 CONFIG_DIR = os.path.join(GLib.get_user_config_dir(), "lutris")
18 CONFIG_FILE = os.path.join(CONFIG_DIR, "lutris.conf")
19 DATA_DIR = os.path.join(GLib.get_user_data_dir(), "lutris")
20 RUNNER_DIR = os.path.join(DATA_DIR, "runners")
21 RUNTIME_DIR = os.path.join(DATA_DIR, "runtime")
22 CACHE_DIR = os.path.join(GLib.get_user_cache_dir(), "lutris")
23 GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games")
24
25 TMP_PATH = os.path.join(CACHE_DIR, "tmp")
26 BANNER_PATH = os.path.join(DATA_DIR, "banners")
27 COVERART_PATH = os.path.join(DATA_DIR, "coverart")
28 ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps")
29
30 sio = SettingsIO(CONFIG_FILE)
31 if "nosetests" in sys.argv[0] or "pytest" in sys.argv[0]:
32 PGA_DB = "/tmp/pga.db"
33 else:
34 PGA_DB = sio.read_setting("pga_path") or os.path.join(DATA_DIR, "pga.db")
35
36 SITE_URL = sio.read_setting("website") or "https://lutris.net"
37
38 DRIVER_HOWTO_URL = "https://github.com/lutris/docs/blob/master/InstallingDrivers.md"
39 INSTALLER_URL = SITE_URL + "/api/installers/%s"
40 # XXX change this, should query on the installer, not the game.
41 INSTALLER_REVISION_URL = SITE_URL + "/api/installers/games/%s/revisions/%s"
42 GAME_URL = SITE_URL + "/games/%s/"
43 RUNTIME_URL = SITE_URL + "/api/runtimes"
44
45 STEAM_API_KEY = sio.read_setting("steam_api_key") or "34C9698CEB394AB4401D65927C6B3752"
46 DISCORD_CLIENT_ID = sio.read_setting("discord_client_id") or "618290412402114570"
47
48
49 read_setting = sio.read_setting
50 write_setting = sio.write_setting
51
[end of lutris/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/settings.py b/lutris/settings.py
--- a/lutris/settings.py
+++ b/lutris/settings.py
@@ -23,7 +23,7 @@
GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games")
TMP_PATH = os.path.join(CACHE_DIR, "tmp")
-BANNER_PATH = os.path.join(DATA_DIR, "banners")
+BANNER_PATH = os.path.join(CACHE_DIR, "banners")
COVERART_PATH = os.path.join(DATA_DIR, "coverart")
ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps")
| {"golden_diff": "diff --git a/lutris/settings.py b/lutris/settings.py\n--- a/lutris/settings.py\n+++ b/lutris/settings.py\n@@ -23,7 +23,7 @@\n GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n \n TMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\n-BANNER_PATH = os.path.join(DATA_DIR, \"banners\")\n+BANNER_PATH = os.path.join(CACHE_DIR, \"banners\")\n COVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\n ICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n", "issue": "Store banners in .cache\nThey have no place to be in .local/share\nStore banners in .cache\nThey have no place to be in .local/share\n", "before_files": [{"content": "\"\"\"Internal settings.\"\"\"\nimport os\nimport sys\nfrom gettext import gettext as _\n\nfrom gi.repository import GLib\n\nfrom lutris import __version__\nfrom lutris.util.settings import SettingsIO\n\nPROJECT = \"Lutris\"\nVERSION = __version__\nCOPYRIGHT = _(\"(c) 2010-2021 Lutris Team\")\nAUTHORS = [_(\"The Lutris team\")]\n\n# Paths\nCONFIG_DIR = os.path.join(GLib.get_user_config_dir(), \"lutris\")\nCONFIG_FILE = os.path.join(CONFIG_DIR, \"lutris.conf\")\nDATA_DIR = os.path.join(GLib.get_user_data_dir(), \"lutris\")\nRUNNER_DIR = os.path.join(DATA_DIR, \"runners\")\nRUNTIME_DIR = os.path.join(DATA_DIR, \"runtime\")\nCACHE_DIR = os.path.join(GLib.get_user_cache_dir(), \"lutris\")\nGAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n\nTMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\nBANNER_PATH = os.path.join(DATA_DIR, \"banners\")\nCOVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\nICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n\nsio = SettingsIO(CONFIG_FILE)\nif \"nosetests\" in sys.argv[0] or \"pytest\" in sys.argv[0]:\n PGA_DB = \"/tmp/pga.db\"\nelse:\n PGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\n\nSITE_URL = sio.read_setting(\"website\") or \"https://lutris.net\"\n\nDRIVER_HOWTO_URL = \"https://github.com/lutris/docs/blob/master/InstallingDrivers.md\"\nINSTALLER_URL = SITE_URL + \"/api/installers/%s\"\n# XXX change this, should query on the installer, not the game.\nINSTALLER_REVISION_URL = SITE_URL + \"/api/installers/games/%s/revisions/%s\"\nGAME_URL = SITE_URL + \"/games/%s/\"\nRUNTIME_URL = SITE_URL + \"/api/runtimes\"\n\nSTEAM_API_KEY = sio.read_setting(\"steam_api_key\") or \"34C9698CEB394AB4401D65927C6B3752\"\nDISCORD_CLIENT_ID = sio.read_setting(\"discord_client_id\") or \"618290412402114570\"\n\n\nread_setting = sio.read_setting\nwrite_setting = sio.write_setting\n", "path": "lutris/settings.py"}]} | 1,211 | 146 |
gh_patches_debug_804 | rasdani/github-patches | git_diff | scikit-hep__awkward-1830 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ak.fill_none(axis=None)` does nothing
### Version of Awkward Array
main
### Description and code to reproduce
The `apply` function for this case does not return (or recurse)
</issue>
<code>
[start of src/awkward/operations/ak_fill_none.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak.nplikes.NumpyMetadata.instance()
8
9
10 def fill_none(array, value, axis=-1, highlevel=True, behavior=None):
11 """
12 Args:
13 array: Data in which to replace None with a given value.
14 value: Data with which to replace None.
15 axis (None or int): If None, replace all None values in the array
16 with the given value; if an int, The dimension at which this
17 operation is applied. The outermost dimension is `0`, followed
18 by `1`, etc., and negative values count backward from the
19 innermost: `-1` is the innermost dimension, `-2` is the next
20 level up, etc.
21 highlevel (bool): If True, return an #ak.Array; otherwise, return
22 a low-level #ak.contents.Content subclass.
23 behavior (None or dict): Custom #ak.behavior for the output array, if
24 high-level.
25
26 Replaces missing values (None) with a given `value`.
27
28 For example, in the following `array`,
29
30 ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
31
32 The None values could be replaced with `0` by
33
34 >>> ak.fill_none(array, 0)
35 <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
36
37 The replacement value doesn't strictly need the same type as the
38 surrounding data. For example, the None values could also be replaced
39 by a string.
40
41 >>> ak.fill_none(array, "hi")
42 <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>
43
44 The list content now has a union type:
45
46 >>> ak.type(ak.fill_none(array, "hi"))
47 3 * var * union[float64, string]
48
49 The values could be floating-point numbers or strings.
50 """
51 with ak._errors.OperationErrorContext(
52 "ak.fill_none",
53 dict(
54 array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior
55 ),
56 ):
57 return _impl(array, value, axis, highlevel, behavior)
58
59
60 def _impl(array, value, axis, highlevel, behavior):
61 arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
62 nplike = ak.nplikes.nplike_of(arraylayout)
63
64 # Convert value type to appropriate layout
65 if (
66 isinstance(value, np.ndarray)
67 and issubclass(value.dtype.type, (np.bool_, np.number))
68 and len(value.shape) != 0
69 ):
70 valuelayout = ak.operations.to_layout(
71 nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False
72 )
73 elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
74 isinstance(value, np.ndarray)
75 and issubclass(value.dtype.type, (np.bool_, np.number))
76 ):
77 valuelayout = ak.operations.to_layout(
78 nplike.asarray(value), allow_record=False, allow_other=False
79 )
80 elif (
81 ak._util.is_sized_iterable(value)
82 and not (isinstance(value, (str, bytes)))
83 or isinstance(value, (ak.highlevel.Record, ak.record.Record))
84 ):
85 valuelayout = ak.operations.to_layout(
86 value, allow_record=True, allow_other=False
87 )
88 if isinstance(valuelayout, ak.record.Record):
89 valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
90 elif len(valuelayout) == 0:
91 offsets = ak.index.Index64(
92 nplike.array([0, 0], dtype=np.int64), nplike=nplike
93 )
94 valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)
95 else:
96 valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)
97 else:
98 valuelayout = ak.operations.to_layout(
99 [value], allow_record=False, allow_other=False
100 )
101
102 def maybe_fillna(layout):
103 if layout.is_OptionType:
104 return layout.fill_none(valuelayout)
105 else:
106 return layout
107
108 if axis is None:
109
110 def action(layout, depth, depth_context, **kwargs):
111 layout = maybe_fillna(layout)
112
113 else:
114
115 def action(layout, depth, depth_context, **kwargs):
116 posaxis = layout.axis_wrap_if_negative(depth_context["posaxis"])
117 depth_context["posaxis"] = posaxis
118 if posaxis + 1 < depth:
119 return layout
120 elif posaxis + 1 == depth:
121 return maybe_fillna(layout)
122
123 depth_context = {"posaxis": axis}
124 out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)
125
126 return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)
127
[end of src/awkward/operations/ak_fill_none.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py
--- a/src/awkward/operations/ak_fill_none.py
+++ b/src/awkward/operations/ak_fill_none.py
@@ -107,8 +107,8 @@
if axis is None:
- def action(layout, depth, depth_context, **kwargs):
- layout = maybe_fillna(layout)
+ def action(layout, continuation, **kwargs):
+ return maybe_fillna(continuation())
else:
| {"golden_diff": "diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py\n--- a/src/awkward/operations/ak_fill_none.py\n+++ b/src/awkward/operations/ak_fill_none.py\n@@ -107,8 +107,8 @@\n \n if axis is None:\n \n- def action(layout, depth, depth_context, **kwargs):\n- layout = maybe_fillna(layout)\n+ def action(layout, continuation, **kwargs):\n+ return maybe_fillna(continuation())\n \n else:\n", "issue": "`ak.fill_none(axis=None)` does nothing\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThe `apply` function for this case does not return (or recurse)\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef fill_none(array, value, axis=-1, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Data in which to replace None with a given value.\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following `array`,\n\n ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], ... ['hi', 3.3, 4.4]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.type(ak.fill_none(array, \"hi\"))\n 3 * var * union[float64, string]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n nplike = ak.nplikes.nplike_of(arraylayout)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value)[np.newaxis], allow_record=False, allow_other=False\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n nplike.array([0, 0], dtype=np.int64), nplike=nplike\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n def maybe_fillna(layout):\n if layout.is_OptionType:\n return layout.fill_none(valuelayout)\n else:\n return layout\n\n if axis is None:\n\n def action(layout, depth, depth_context, **kwargs):\n layout = maybe_fillna(layout)\n\n else:\n\n def action(layout, depth, depth_context, **kwargs):\n posaxis = layout.axis_wrap_if_negative(depth_context[\"posaxis\"])\n depth_context[\"posaxis\"] = posaxis\n if posaxis + 1 < depth:\n return layout\n elif posaxis + 1 == depth:\n return maybe_fillna(layout)\n\n depth_context = {\"posaxis\": axis}\n out = arraylayout.recursively_apply(action, behavior, depth_context=depth_context)\n\n return ak._util.wrap(out, ak._util.behavior_of(array, behavior=behavior), highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}]} | 2,024 | 126 |
gh_patches_debug_36171 | rasdani/github-patches | git_diff | pandas-dev__pandas-28230 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ASV Benchmark for read_excel
Right now we don't benchmark performance of read_excel across the various engines (xlrd, openpyxl and odfpy) so would gladly accept contributions to improve benchmark coverage on those
</issue>
<code>
[start of asv_bench/benchmarks/io/excel.py]
1 from io import BytesIO
2
3 import numpy as np
4
5 from pandas import DataFrame, ExcelWriter, date_range, read_excel
6 import pandas.util.testing as tm
7
8
9 class Excel:
10
11 params = ["openpyxl", "xlsxwriter", "xlwt"]
12 param_names = ["engine"]
13
14 def setup(self, engine):
15 N = 2000
16 C = 5
17 self.df = DataFrame(
18 np.random.randn(N, C),
19 columns=["float{}".format(i) for i in range(C)],
20 index=date_range("20000101", periods=N, freq="H"),
21 )
22 self.df["object"] = tm.makeStringIndex(N)
23 self.bio_read = BytesIO()
24 self.writer_read = ExcelWriter(self.bio_read, engine=engine)
25 self.df.to_excel(self.writer_read, sheet_name="Sheet1")
26 self.writer_read.save()
27 self.bio_read.seek(0)
28
29 def time_read_excel(self, engine):
30 read_excel(self.bio_read)
31
32 def time_write_excel(self, engine):
33 bio_write = BytesIO()
34 bio_write.seek(0)
35 writer_write = ExcelWriter(bio_write, engine=engine)
36 self.df.to_excel(writer_write, sheet_name="Sheet1")
37 writer_write.save()
38
39
40 from ..pandas_vb_common import setup # noqa: F401 isort:skip
41
[end of asv_bench/benchmarks/io/excel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py
--- a/asv_bench/benchmarks/io/excel.py
+++ b/asv_bench/benchmarks/io/excel.py
@@ -1,40 +1,72 @@
from io import BytesIO
import numpy as np
+from odf.opendocument import OpenDocumentSpreadsheet
+from odf.table import Table, TableCell, TableRow
+from odf.text import P
from pandas import DataFrame, ExcelWriter, date_range, read_excel
import pandas.util.testing as tm
-class Excel:
+def _generate_dataframe():
+ N = 2000
+ C = 5
+ df = DataFrame(
+ np.random.randn(N, C),
+ columns=["float{}".format(i) for i in range(C)],
+ index=date_range("20000101", periods=N, freq="H"),
+ )
+ df["object"] = tm.makeStringIndex(N)
+ return df
+
+
+class WriteExcel:
params = ["openpyxl", "xlsxwriter", "xlwt"]
param_names = ["engine"]
def setup(self, engine):
- N = 2000
- C = 5
- self.df = DataFrame(
- np.random.randn(N, C),
- columns=["float{}".format(i) for i in range(C)],
- index=date_range("20000101", periods=N, freq="H"),
- )
- self.df["object"] = tm.makeStringIndex(N)
- self.bio_read = BytesIO()
- self.writer_read = ExcelWriter(self.bio_read, engine=engine)
- self.df.to_excel(self.writer_read, sheet_name="Sheet1")
- self.writer_read.save()
- self.bio_read.seek(0)
-
- def time_read_excel(self, engine):
- read_excel(self.bio_read)
+ self.df = _generate_dataframe()
def time_write_excel(self, engine):
- bio_write = BytesIO()
- bio_write.seek(0)
- writer_write = ExcelWriter(bio_write, engine=engine)
- self.df.to_excel(writer_write, sheet_name="Sheet1")
- writer_write.save()
+ bio = BytesIO()
+ bio.seek(0)
+ writer = ExcelWriter(bio, engine=engine)
+ self.df.to_excel(writer, sheet_name="Sheet1")
+ writer.save()
+
+
+class ReadExcel:
+
+ params = ["xlrd", "openpyxl", "odf"]
+ param_names = ["engine"]
+ fname_excel = "spreadsheet.xlsx"
+ fname_odf = "spreadsheet.ods"
+
+ def _create_odf(self):
+ doc = OpenDocumentSpreadsheet()
+ table = Table(name="Table1")
+ for row in self.df.values:
+ tr = TableRow()
+ for val in row:
+ tc = TableCell(valuetype="string")
+ tc.addElement(P(text=val))
+ tr.addElement(tc)
+ table.addElement(tr)
+
+ doc.spreadsheet.addElement(table)
+ doc.save(self.fname_odf)
+
+ def setup_cache(self):
+ self.df = _generate_dataframe()
+
+ self.df.to_excel(self.fname_excel, sheet_name="Sheet1")
+ self._create_odf()
+
+ def time_read_excel(self, engine):
+ fname = self.fname_odf if engine == "odf" else self.fname_excel
+ read_excel(fname, engine=engine)
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| {"golden_diff": "diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py\n--- a/asv_bench/benchmarks/io/excel.py\n+++ b/asv_bench/benchmarks/io/excel.py\n@@ -1,40 +1,72 @@\n from io import BytesIO\n \n import numpy as np\n+from odf.opendocument import OpenDocumentSpreadsheet\n+from odf.table import Table, TableCell, TableRow\n+from odf.text import P\n \n from pandas import DataFrame, ExcelWriter, date_range, read_excel\n import pandas.util.testing as tm\n \n \n-class Excel:\n+def _generate_dataframe():\n+ N = 2000\n+ C = 5\n+ df = DataFrame(\n+ np.random.randn(N, C),\n+ columns=[\"float{}\".format(i) for i in range(C)],\n+ index=date_range(\"20000101\", periods=N, freq=\"H\"),\n+ )\n+ df[\"object\"] = tm.makeStringIndex(N)\n+ return df\n+\n+\n+class WriteExcel:\n \n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n \n def setup(self, engine):\n- N = 2000\n- C = 5\n- self.df = DataFrame(\n- np.random.randn(N, C),\n- columns=[\"float{}\".format(i) for i in range(C)],\n- index=date_range(\"20000101\", periods=N, freq=\"H\"),\n- )\n- self.df[\"object\"] = tm.makeStringIndex(N)\n- self.bio_read = BytesIO()\n- self.writer_read = ExcelWriter(self.bio_read, engine=engine)\n- self.df.to_excel(self.writer_read, sheet_name=\"Sheet1\")\n- self.writer_read.save()\n- self.bio_read.seek(0)\n-\n- def time_read_excel(self, engine):\n- read_excel(self.bio_read)\n+ self.df = _generate_dataframe()\n \n def time_write_excel(self, engine):\n- bio_write = BytesIO()\n- bio_write.seek(0)\n- writer_write = ExcelWriter(bio_write, engine=engine)\n- self.df.to_excel(writer_write, sheet_name=\"Sheet1\")\n- writer_write.save()\n+ bio = BytesIO()\n+ bio.seek(0)\n+ writer = ExcelWriter(bio, engine=engine)\n+ self.df.to_excel(writer, sheet_name=\"Sheet1\")\n+ writer.save()\n+\n+\n+class ReadExcel:\n+\n+ params = [\"xlrd\", \"openpyxl\", \"odf\"]\n+ param_names = [\"engine\"]\n+ fname_excel = \"spreadsheet.xlsx\"\n+ fname_odf = \"spreadsheet.ods\"\n+\n+ def _create_odf(self):\n+ doc = OpenDocumentSpreadsheet()\n+ table = Table(name=\"Table1\")\n+ for row in self.df.values:\n+ tr = TableRow()\n+ for val in row:\n+ tc = TableCell(valuetype=\"string\")\n+ tc.addElement(P(text=val))\n+ tr.addElement(tc)\n+ table.addElement(tr)\n+\n+ doc.spreadsheet.addElement(table)\n+ doc.save(self.fname_odf)\n+\n+ def setup_cache(self):\n+ self.df = _generate_dataframe()\n+\n+ self.df.to_excel(self.fname_excel, sheet_name=\"Sheet1\")\n+ self._create_odf()\n+\n+ def time_read_excel(self, engine):\n+ fname = self.fname_odf if engine == \"odf\" else self.fname_excel\n+ read_excel(fname, engine=engine)\n \n \n from ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "issue": "ASV Benchmark for read_excel\nRight now we don't benchmark performance of read_excel across the various engines (xlrd, openpyxl and odfpy) so would gladly accept contributions to improve benchmark coverage on those\n", "before_files": [{"content": "from io import BytesIO\n\nimport numpy as np\n\nfrom pandas import DataFrame, ExcelWriter, date_range, read_excel\nimport pandas.util.testing as tm\n\n\nclass Excel:\n\n params = [\"openpyxl\", \"xlsxwriter\", \"xlwt\"]\n param_names = [\"engine\"]\n\n def setup(self, engine):\n N = 2000\n C = 5\n self.df = DataFrame(\n np.random.randn(N, C),\n columns=[\"float{}\".format(i) for i in range(C)],\n index=date_range(\"20000101\", periods=N, freq=\"H\"),\n )\n self.df[\"object\"] = tm.makeStringIndex(N)\n self.bio_read = BytesIO()\n self.writer_read = ExcelWriter(self.bio_read, engine=engine)\n self.df.to_excel(self.writer_read, sheet_name=\"Sheet1\")\n self.writer_read.save()\n self.bio_read.seek(0)\n\n def time_read_excel(self, engine):\n read_excel(self.bio_read)\n\n def time_write_excel(self, engine):\n bio_write = BytesIO()\n bio_write.seek(0)\n writer_write = ExcelWriter(bio_write, engine=engine)\n self.df.to_excel(writer_write, sheet_name=\"Sheet1\")\n writer_write.save()\n\n\nfrom ..pandas_vb_common import setup # noqa: F401 isort:skip\n", "path": "asv_bench/benchmarks/io/excel.py"}]} | 965 | 812 |
gh_patches_debug_27454 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4530 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CloudFormation EC2Credentials.py does evaluate if the UserData is base64 encoded
**Describe the issue**
The check EC2Credentials.py which checks the UserData of a CloudFormation template against secrets does not work if the userdata are already encoded in base64.
The check is not returning the correct result and the processing time is very slow.
**Examples**
```yaml
AWSTemplateFormatVersion: "2010-09-09"
Resources:
Resource0:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-04169656fea786776
UserData: IyEgL2Jpbi9iYXNoCnN1ZG8gYXB0LWdldCB1cGRhdGUKc3VkbyBhcHQtZ2V0IGluc3RhbGwgLXkgYXBhY2hlMgpzdWRvIHN5c3RlbWN0bCBzdGFydCBhcGFjaGUyCnN1ZG8gc3lzdGVtY3RsIGVuYWJsZSBhcGFjaGUyCmV4cG9ydCBBV1NfQUNDRVNTX0tFWV9JRApleHBvcnQgQVdTX0FDQ0VTU19LRVlfSUQ9QUtJQUlPU0ZPRE5ON0VYQU1QTEUKZXhwb3J0IEFXU19TRUNSRVRfQUNDRVNTX0tFWT13SmFsclhVdG5GRU1JL0s3TURFTkcvYlB4UmZpQ1lFWEFNUExFS0VZCmV4cG9ydCBBV1NfREVGQVVMVF9SRUdJT049dXMtd2VzdC0yCmVjaG8gIjxoMT5EZXBsb3llZCB2aWEgVGVycmFmb3JtPC9oMT4iIHwgc3VkbyB0ZWUgL3Zhci93d3cvaHRtbC9pbmRleC5odG1s
```
**Version (please complete the following information):**
- Checkov Version [e.g. 22]
- >2.0.0
**Additional context**
Fix implemented here by trying to decode the base64 string.
https://github.com/j2clerck/checkov/commit/af9abd724520ec21ec5510dfc5db2ef83fd9e6dc
</issue>
<code>
[start of checkov/cloudformation/checks/resource/aws/EC2Credentials.py]
1 from typing import List
2
3 from checkov.common.models.enums import CheckResult, CheckCategories
4 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
5 from checkov.common.util.secrets import get_secrets_from_string
6
7
8 class EC2Credentials(BaseResourceCheck):
9 def __init__(self):
10 name = "Ensure no hard-coded secrets exist in EC2 user data"
11 id = "CKV_AWS_46"
12 supported_resources = ['AWS::EC2::Instance']
13 categories = [CheckCategories.SECRETS]
14 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
15
16 def scan_resource_conf(self, conf):
17 if 'Properties' in conf.keys():
18 if 'UserData' in conf['Properties'].keys():
19 user_data = conf['Properties']['UserData']
20 # Cast to string as user data object can look slightly different depending
21 # on Yaml or JSON CF Templates and how the B64 conversion is done.
22 user_data_str = str(user_data)
23 if isinstance(user_data_str, str):
24 secrets = get_secrets_from_string(str(user_data_str))
25 if secrets:
26 for idx, secret in enumerate(secrets):
27 conf[f'{self.id}_secret_{idx}'] = secret
28 return CheckResult.FAILED
29 return CheckResult.PASSED
30
31 def get_evaluated_keys(self) -> List[str]:
32 return ["Properties/UserData"]
33
34
35 check = EC2Credentials()
36
[end of checkov/cloudformation/checks/resource/aws/EC2Credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py
--- a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py
+++ b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py
@@ -1,4 +1,5 @@
from typing import List
+from base64 import b64decode
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
@@ -17,9 +18,15 @@
if 'Properties' in conf.keys():
if 'UserData' in conf['Properties'].keys():
user_data = conf['Properties']['UserData']
- # Cast to string as user data object can look slightly different depending
- # on Yaml or JSON CF Templates and how the B64 conversion is done.
- user_data_str = str(user_data)
+ # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes)
+ # the get_secrets_from_string function.
+ try:
+ user_data_str = b64decode(user_data).decode()
+ except Exception:
+ # Cast to string as user data object can look slightly different depending
+ # on Yaml or JSON CF Templates and how the B64 conversion is done.
+ user_data_str = str(user_data)
+
if isinstance(user_data_str, str):
secrets = get_secrets_from_string(str(user_data_str))
if secrets:
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n--- a/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n+++ b/checkov/cloudformation/checks/resource/aws/EC2Credentials.py\n@@ -1,4 +1,5 @@\n from typing import List\n+from base64 import b64decode\n \n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n@@ -17,9 +18,15 @@\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n- # Cast to string as user data object can look slightly different depending\n- # on Yaml or JSON CF Templates and how the B64 conversion is done.\n- user_data_str = str(user_data)\n+ # In some case, the UserData might be a base64 encoded string which will slow down (3 minutes)\n+ # the get_secrets_from_string function.\n+ try:\n+ user_data_str = b64decode(user_data).decode()\n+ except Exception:\n+ # Cast to string as user data object can look slightly different depending\n+ # on Yaml or JSON CF Templates and how the B64 conversion is done.\n+ user_data_str = str(user_data)\n+\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n", "issue": "CloudFormation EC2Credentials.py does evaluate if the UserData is base64 encoded\n**Describe the issue**\r\nThe check EC2Credentials.py which checks the UserData of a CloudFormation template against secrets does not work if the userdata are already encoded in base64. \r\nThe check is not returning the correct result and the processing time is very slow.\r\n\r\n**Examples**\r\n```yaml\r\nAWSTemplateFormatVersion: \"2010-09-09\"\r\nResources:\r\n Resource0:\r\n Type: AWS::EC2::Instance\r\n Properties:\r\n ImageId: ami-04169656fea786776\r\n UserData: IyEgL2Jpbi9iYXNoCnN1ZG8gYXB0LWdldCB1cGRhdGUKc3VkbyBhcHQtZ2V0IGluc3RhbGwgLXkgYXBhY2hlMgpzdWRvIHN5c3RlbWN0bCBzdGFydCBhcGFjaGUyCnN1ZG8gc3lzdGVtY3RsIGVuYWJsZSBhcGFjaGUyCmV4cG9ydCBBV1NfQUNDRVNTX0tFWV9JRApleHBvcnQgQVdTX0FDQ0VTU19LRVlfSUQ9QUtJQUlPU0ZPRE5ON0VYQU1QTEUKZXhwb3J0IEFXU19TRUNSRVRfQUNDRVNTX0tFWT13SmFsclhVdG5GRU1JL0s3TURFTkcvYlB4UmZpQ1lFWEFNUExFS0VZCmV4cG9ydCBBV1NfREVGQVVMVF9SRUdJT049dXMtd2VzdC0yCmVjaG8gIjxoMT5EZXBsb3llZCB2aWEgVGVycmFmb3JtPC9oMT4iIHwgc3VkbyB0ZWUgL3Zhci93d3cvaHRtbC9pbmRleC5odG1s\r\n\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version [e.g. 22]\r\n - >2.0.0\r\n\r\n**Additional context**\r\nFix implemented here by trying to decode the base64 string.\r\nhttps://github.com/j2clerck/checkov/commit/af9abd724520ec21ec5510dfc5db2ef83fd9e6dc\r\n\n", "before_files": [{"content": "from typing import List\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.util.secrets import get_secrets_from_string\n\n\nclass EC2Credentials(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure no hard-coded secrets exist in EC2 user data\"\n id = \"CKV_AWS_46\"\n supported_resources = ['AWS::EC2::Instance']\n categories = [CheckCategories.SECRETS]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'Properties' in conf.keys():\n if 'UserData' in conf['Properties'].keys():\n user_data = conf['Properties']['UserData']\n # Cast to string as user data object can look slightly different depending\n # on Yaml or JSON CF Templates and how the B64 conversion is done.\n user_data_str = str(user_data)\n if isinstance(user_data_str, str):\n secrets = get_secrets_from_string(str(user_data_str))\n if secrets:\n for idx, secret in enumerate(secrets):\n conf[f'{self.id}_secret_{idx}'] = secret\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"Properties/UserData\"]\n\n\ncheck = EC2Credentials()\n", "path": "checkov/cloudformation/checks/resource/aws/EC2Credentials.py"}]} | 1,526 | 342 |
gh_patches_debug_1794 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4053 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Handle `normed` to `density` deprecations in `histogram`
## Description
See https://github.com/scikit-image/scikit-image/pull/3427 and the related issues.
</issue>
<code>
[start of doc/examples/features_detection/plot_local_binary_pattern.py]
1 """
2 ===============================================
3 Local Binary Pattern for texture classification
4 ===============================================
5
6 In this example, we will see how to classify textures based on LBP (Local
7 Binary Pattern). LBP looks at points surrounding a central point and tests
8 whether the surrounding points are greater than or less than the central point
9 (i.e. gives a binary result).
10
11 Before trying out LBP on an image, it helps to look at a schematic of LBPs.
12 The below code is just used to plot the schematic.
13 """
14 import numpy as np
15 import matplotlib.pyplot as plt
16
17
18 METHOD = 'uniform'
19 plt.rcParams['font.size'] = 9
20
21
22 def plot_circle(ax, center, radius, color):
23 circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')
24 ax.add_patch(circle)
25
26
27 def plot_lbp_model(ax, binary_values):
28 """Draw the schematic for a local binary pattern."""
29 # Geometry spec
30 theta = np.deg2rad(45)
31 R = 1
32 r = 0.15
33 w = 1.5
34 gray = '0.5'
35
36 # Draw the central pixel.
37 plot_circle(ax, (0, 0), radius=r, color=gray)
38 # Draw the surrounding pixels.
39 for i, facecolor in enumerate(binary_values):
40 x = R * np.cos(i * theta)
41 y = R * np.sin(i * theta)
42 plot_circle(ax, (x, y), radius=r, color=str(facecolor))
43
44 # Draw the pixel grid.
45 for x in np.linspace(-w, w, 4):
46 ax.axvline(x, color=gray)
47 ax.axhline(x, color=gray)
48
49 # Tweak the layout.
50 ax.axis('image')
51 ax.axis('off')
52 size = w + 0.2
53 ax.set_xlim(-size, size)
54 ax.set_ylim(-size, size)
55
56
57 fig, axes = plt.subplots(ncols=5, figsize=(7, 2))
58
59 titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']
60
61 binary_patterns = [np.zeros(8),
62 np.ones(8),
63 np.hstack([np.ones(4), np.zeros(4)]),
64 np.hstack([np.zeros(3), np.ones(5)]),
65 [1, 0, 0, 1, 1, 1, 0, 0]]
66
67 for ax, values, name in zip(axes, binary_patterns, titles):
68 plot_lbp_model(ax, values)
69 ax.set_title(name)
70
71 ######################################################################
72 # The figure above shows example results with black (or white) representing
73 # pixels that are less (or more) intense than the central pixel. When
74 # surrounding pixels are all black or all white, then that image region is
75 # flat (i.e. featureless). Groups of continuous black or white pixels are
76 # considered "uniform" patterns that can be interpreted as corners or edges.
77 # If pixels switch back-and-forth between black and white pixels, the pattern
78 # is considered "non-uniform".
79 #
80 # When using LBP to detect texture, you measure a collection of LBPs over an
81 # image patch and look at the distribution of these LBPs. Lets apply LBP to a
82 # brick texture.
83
84 from skimage.transform import rotate
85 from skimage.feature import local_binary_pattern
86 from skimage import data
87 from skimage.color import label2rgb
88
89 # settings for LBP
90 radius = 3
91 n_points = 8 * radius
92
93
94 def overlay_labels(image, lbp, labels):
95 mask = np.logical_or.reduce([lbp == each for each in labels])
96 return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
97
98
99 def highlight_bars(bars, indexes):
100 for i in indexes:
101 bars[i].set_facecolor('r')
102
103
104 image = data.brick()
105 lbp = local_binary_pattern(image, n_points, radius, METHOD)
106
107
108 def hist(ax, lbp):
109 n_bins = int(lbp.max() + 1)
110 return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
111 facecolor='0.5')
112
113
114 # plot histograms of LBP of textures
115 fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))
116 plt.gray()
117
118 titles = ('edge', 'flat', 'corner')
119 w = width = radius - 1
120 edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)
121 flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))
122 i_14 = n_points // 4 # 1/4th of the histogram
123 i_34 = 3 * (n_points // 4) # 3/4th of the histogram
124 corner_labels = (list(range(i_14 - w, i_14 + w + 1)) +
125 list(range(i_34 - w, i_34 + w + 1)))
126
127 label_sets = (edge_labels, flat_labels, corner_labels)
128
129 for ax, labels in zip(ax_img, label_sets):
130 ax.imshow(overlay_labels(image, lbp, labels))
131
132 for ax, labels, name in zip(ax_hist, label_sets, titles):
133 counts, _, bars = hist(ax, lbp)
134 highlight_bars(bars, labels)
135 ax.set_ylim(top=np.max(counts[:-1]))
136 ax.set_xlim(right=n_points + 2)
137 ax.set_title(name)
138
139 ax_hist[0].set_ylabel('Percentage')
140 for ax in ax_img:
141 ax.axis('off')
142
143
144 ######################################################################
145 # The above plot highlights flat, edge-like, and corner-like regions of the
146 # image.
147 #
148 # The histogram of the LBP result is a good measure to classify textures.
149 # Here, we test the histogram distributions against each other using the
150 # Kullback-Leibler-Divergence.
151
152 # settings for LBP
153 radius = 2
154 n_points = 8 * radius
155
156
157 def kullback_leibler_divergence(p, q):
158 p = np.asarray(p)
159 q = np.asarray(q)
160 filt = np.logical_and(p != 0, q != 0)
161 return np.sum(p[filt] * np.log2(p[filt] / q[filt]))
162
163
164 def match(refs, img):
165 best_score = 10
166 best_name = None
167 lbp = local_binary_pattern(img, n_points, radius, METHOD)
168 n_bins = int(lbp.max() + 1)
169 hist, _ = np.histogram(lbp, density=True, bins=n_bins, range=(0, n_bins))
170 for name, ref in refs.items():
171 ref_hist, _ = np.histogram(ref, density=True, bins=n_bins,
172 range=(0, n_bins))
173 score = kullback_leibler_divergence(hist, ref_hist)
174 if score < best_score:
175 best_score = score
176 best_name = name
177 return best_name
178
179
180 brick = data.brick()
181 grass = data.grass()
182 gravel = data.gravel()
183
184 refs = {
185 'brick': local_binary_pattern(brick, n_points, radius, METHOD),
186 'grass': local_binary_pattern(grass, n_points, radius, METHOD),
187 'gravel': local_binary_pattern(gravel, n_points, radius, METHOD)
188 }
189
190 # classify rotated textures
191 print('Rotated images matched against references using LBP:')
192 print('original: brick, rotated: 30deg, match result: ',
193 match(refs, rotate(brick, angle=30, resize=False)))
194 print('original: brick, rotated: 70deg, match result: ',
195 match(refs, rotate(brick, angle=70, resize=False)))
196 print('original: grass, rotated: 145deg, match result: ',
197 match(refs, rotate(grass, angle=145, resize=False)))
198
199 # plot histograms of LBP of textures
200 fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,
201 figsize=(9, 6))
202 plt.gray()
203
204 ax1.imshow(brick)
205 ax1.axis('off')
206 hist(ax4, refs['brick'])
207 ax4.set_ylabel('Percentage')
208
209 ax2.imshow(grass)
210 ax2.axis('off')
211 hist(ax5, refs['grass'])
212 ax5.set_xlabel('Uniform LBP values')
213
214 ax3.imshow(gravel)
215 ax3.axis('off')
216 hist(ax6, refs['gravel'])
217
218 plt.show()
219
[end of doc/examples/features_detection/plot_local_binary_pattern.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/examples/features_detection/plot_local_binary_pattern.py b/doc/examples/features_detection/plot_local_binary_pattern.py
--- a/doc/examples/features_detection/plot_local_binary_pattern.py
+++ b/doc/examples/features_detection/plot_local_binary_pattern.py
@@ -107,7 +107,7 @@
def hist(ax, lbp):
n_bins = int(lbp.max() + 1)
- return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),
+ return ax.hist(lbp.ravel(), density=True, bins=n_bins, range=(0, n_bins),
facecolor='0.5')
| {"golden_diff": "diff --git a/doc/examples/features_detection/plot_local_binary_pattern.py b/doc/examples/features_detection/plot_local_binary_pattern.py\n--- a/doc/examples/features_detection/plot_local_binary_pattern.py\n+++ b/doc/examples/features_detection/plot_local_binary_pattern.py\n@@ -107,7 +107,7 @@\n \n def hist(ax, lbp):\n n_bins = int(lbp.max() + 1)\n- return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),\n+ return ax.hist(lbp.ravel(), density=True, bins=n_bins, range=(0, n_bins),\n facecolor='0.5')\n", "issue": "Handle `normed` to `density` deprecations in `histogram` \n## Description\r\nSee https://github.com/scikit-image/scikit-image/pull/3427 and the related issues.\r\n\n", "before_files": [{"content": "\"\"\"\n===============================================\nLocal Binary Pattern for texture classification\n===============================================\n\nIn this example, we will see how to classify textures based on LBP (Local\nBinary Pattern). LBP looks at points surrounding a central point and tests\nwhether the surrounding points are greater than or less than the central point\n(i.e. gives a binary result).\n\nBefore trying out LBP on an image, it helps to look at a schematic of LBPs.\nThe below code is just used to plot the schematic.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nMETHOD = 'uniform'\nplt.rcParams['font.size'] = 9\n\n\ndef plot_circle(ax, center, radius, color):\n circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5')\n ax.add_patch(circle)\n\n\ndef plot_lbp_model(ax, binary_values):\n \"\"\"Draw the schematic for a local binary pattern.\"\"\"\n # Geometry spec\n theta = np.deg2rad(45)\n R = 1\n r = 0.15\n w = 1.5\n gray = '0.5'\n\n # Draw the central pixel.\n plot_circle(ax, (0, 0), radius=r, color=gray)\n # Draw the surrounding pixels.\n for i, facecolor in enumerate(binary_values):\n x = R * np.cos(i * theta)\n y = R * np.sin(i * theta)\n plot_circle(ax, (x, y), radius=r, color=str(facecolor))\n\n # Draw the pixel grid.\n for x in np.linspace(-w, w, 4):\n ax.axvline(x, color=gray)\n ax.axhline(x, color=gray)\n\n # Tweak the layout.\n ax.axis('image')\n ax.axis('off')\n size = w + 0.2\n ax.set_xlim(-size, size)\n ax.set_ylim(-size, size)\n\n\nfig, axes = plt.subplots(ncols=5, figsize=(7, 2))\n\ntitles = ['flat', 'flat', 'edge', 'corner', 'non-uniform']\n\nbinary_patterns = [np.zeros(8),\n np.ones(8),\n np.hstack([np.ones(4), np.zeros(4)]),\n np.hstack([np.zeros(3), np.ones(5)]),\n [1, 0, 0, 1, 1, 1, 0, 0]]\n\nfor ax, values, name in zip(axes, binary_patterns, titles):\n plot_lbp_model(ax, values)\n ax.set_title(name)\n\n######################################################################\n# The figure above shows example results with black (or white) representing\n# pixels that are less (or more) intense than the central pixel. When\n# surrounding pixels are all black or all white, then that image region is\n# flat (i.e. featureless). Groups of continuous black or white pixels are\n# considered \"uniform\" patterns that can be interpreted as corners or edges.\n# If pixels switch back-and-forth between black and white pixels, the pattern\n# is considered \"non-uniform\".\n#\n# When using LBP to detect texture, you measure a collection of LBPs over an\n# image patch and look at the distribution of these LBPs. Lets apply LBP to a\n# brick texture.\n\nfrom skimage.transform import rotate\nfrom skimage.feature import local_binary_pattern\nfrom skimage import data\nfrom skimage.color import label2rgb\n\n# settings for LBP\nradius = 3\nn_points = 8 * radius\n\n\ndef overlay_labels(image, lbp, labels):\n mask = np.logical_or.reduce([lbp == each for each in labels])\n return label2rgb(mask, image=image, bg_label=0, alpha=0.5)\n\n\ndef highlight_bars(bars, indexes):\n for i in indexes:\n bars[i].set_facecolor('r')\n\n\nimage = data.brick()\nlbp = local_binary_pattern(image, n_points, radius, METHOD)\n\n\ndef hist(ax, lbp):\n n_bins = int(lbp.max() + 1)\n return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins),\n facecolor='0.5')\n\n\n# plot histograms of LBP of textures\nfig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6))\nplt.gray()\n\ntitles = ('edge', 'flat', 'corner')\nw = width = radius - 1\nedge_labels = range(n_points // 2 - w, n_points // 2 + w + 1)\nflat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2))\ni_14 = n_points // 4 # 1/4th of the histogram\ni_34 = 3 * (n_points // 4) # 3/4th of the histogram\ncorner_labels = (list(range(i_14 - w, i_14 + w + 1)) +\n list(range(i_34 - w, i_34 + w + 1)))\n\nlabel_sets = (edge_labels, flat_labels, corner_labels)\n\nfor ax, labels in zip(ax_img, label_sets):\n ax.imshow(overlay_labels(image, lbp, labels))\n\nfor ax, labels, name in zip(ax_hist, label_sets, titles):\n counts, _, bars = hist(ax, lbp)\n highlight_bars(bars, labels)\n ax.set_ylim(top=np.max(counts[:-1]))\n ax.set_xlim(right=n_points + 2)\n ax.set_title(name)\n\nax_hist[0].set_ylabel('Percentage')\nfor ax in ax_img:\n ax.axis('off')\n\n\n######################################################################\n# The above plot highlights flat, edge-like, and corner-like regions of the\n# image.\n#\n# The histogram of the LBP result is a good measure to classify textures.\n# Here, we test the histogram distributions against each other using the\n# Kullback-Leibler-Divergence.\n\n# settings for LBP\nradius = 2\nn_points = 8 * radius\n\n\ndef kullback_leibler_divergence(p, q):\n p = np.asarray(p)\n q = np.asarray(q)\n filt = np.logical_and(p != 0, q != 0)\n return np.sum(p[filt] * np.log2(p[filt] / q[filt]))\n\n\ndef match(refs, img):\n best_score = 10\n best_name = None\n lbp = local_binary_pattern(img, n_points, radius, METHOD)\n n_bins = int(lbp.max() + 1)\n hist, _ = np.histogram(lbp, density=True, bins=n_bins, range=(0, n_bins))\n for name, ref in refs.items():\n ref_hist, _ = np.histogram(ref, density=True, bins=n_bins,\n range=(0, n_bins))\n score = kullback_leibler_divergence(hist, ref_hist)\n if score < best_score:\n best_score = score\n best_name = name\n return best_name\n\n\nbrick = data.brick()\ngrass = data.grass()\ngravel = data.gravel()\n\nrefs = {\n 'brick': local_binary_pattern(brick, n_points, radius, METHOD),\n 'grass': local_binary_pattern(grass, n_points, radius, METHOD),\n 'gravel': local_binary_pattern(gravel, n_points, radius, METHOD)\n}\n\n# classify rotated textures\nprint('Rotated images matched against references using LBP:')\nprint('original: brick, rotated: 30deg, match result: ',\n match(refs, rotate(brick, angle=30, resize=False)))\nprint('original: brick, rotated: 70deg, match result: ',\n match(refs, rotate(brick, angle=70, resize=False)))\nprint('original: grass, rotated: 145deg, match result: ',\n match(refs, rotate(grass, angle=145, resize=False)))\n\n# plot histograms of LBP of textures\nfig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3,\n figsize=(9, 6))\nplt.gray()\n\nax1.imshow(brick)\nax1.axis('off')\nhist(ax4, refs['brick'])\nax4.set_ylabel('Percentage')\n\nax2.imshow(grass)\nax2.axis('off')\nhist(ax5, refs['grass'])\nax5.set_xlabel('Uniform LBP values')\n\nax3.imshow(gravel)\nax3.axis('off')\nhist(ax6, refs['gravel'])\n\nplt.show()\n", "path": "doc/examples/features_detection/plot_local_binary_pattern.py"}]} | 3,027 | 142 |
gh_patches_debug_17541 | rasdani/github-patches | git_diff | liqd__a4-opin-284 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filter draft projects from all listings
Projects that are in draft mode should be removed from most listings:
- [x] latest projects for all users
- [x] other projects for all users
- [x] organisation page for non-initiators
</issue>
<code>
[start of euth/organisations/views.py]
1 from django.views import generic
2
3 from . import models
4
5
6 class OrganisationDetailView(generic.DetailView):
7 model = models.Organisation
8
9
10 class OrganisationListView(generic.ListView):
11 model = models.Organisation
12 paginate_by = 10
13
[end of euth/organisations/views.py]
[start of euth/projects/models.py]
1 from django.conf import settings
2 from django.db import models
3 from django.utils import functional, timezone
4
5 from contrib.transforms import html_transforms
6 from euth.contrib import base_models, validators
7 from euth.organisations import models as org_models
8
9
10 class ProjectManager(models.Manager):
11
12 def get_by_natural_key(self, name):
13 return self.get(name=name)
14
15 def featured(self):
16 return self.filter(is_draft=False).order_by('-created')[:8]
17
18
19 class Project(base_models.TimeStampedModel):
20 slug = models.SlugField(max_length=512, unique=True)
21 name = models.CharField(max_length=512)
22 organisation = models.ForeignKey(
23 org_models.Organisation, on_delete=models.CASCADE)
24 description = models.CharField(max_length=1024)
25 information = models.TextField()
26 is_public = models.BooleanField(default=True)
27 is_draft = models.BooleanField(default=True)
28 image = models.ImageField(
29 upload_to='projects/backgrounds',
30 blank=True,
31 validators=[validators.validate_hero_image])
32 participants = models.ManyToManyField(
33 settings.AUTH_USER_MODEL,
34 related_name='project_participant',
35 blank=True,
36 )
37 moderators = models.ManyToManyField(
38 settings.AUTH_USER_MODEL,
39 related_name='project_moderator'
40 )
41
42 objects = ProjectManager()
43
44 def __str__(self):
45 return self.name
46
47 def save(self, *args, **kwargs):
48 self.information = html_transforms.clean_html_field(
49 self.information)
50 super(Project, self).save(*args, **kwargs)
51
52 def get_absolute_url(self):
53 from django.core.urlresolvers import reverse
54 return reverse('project-detail', args=[str(self.slug)])
55
56 def has_member(self, user):
57 """
58 Everybody is member of all public projects and private projects can
59 be joined as moderator or participant.
60 """
61 return (
62 (user.is_authenticated() and self.is_public)
63 or (user in self.participants.all())
64 or (user in self.moderators.all())
65 )
66
67 @functional.cached_property
68 def other_projects(self):
69 other_projects = self.organisation.project_set.all().exclude(
70 slug=self.slug)
71 return other_projects
72
73 @functional.cached_property
74 def is_private(self):
75 return not self.is_public
76
77 @functional.cached_property
78 def active_phase(self):
79 from euth.phases import models as phase_models
80 return phase_models.Phase.objects\
81 .filter(module__project=self)\
82 .active_phases()\
83 .first()
84
85 @property
86 def days_left(self):
87 if self.active_phase:
88 today = timezone.now().replace(hour=0, minute=0, second=0)
89 time_delta = self.active_phase.end_date - today
90 return time_delta.days
91
[end of euth/projects/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/organisations/views.py b/euth/organisations/views.py
--- a/euth/organisations/views.py
+++ b/euth/organisations/views.py
@@ -6,6 +6,12 @@
class OrganisationDetailView(generic.DetailView):
model = models.Organisation
+ def visible_projects(self):
+ if self.request.user in self.object.initiators.all():
+ return self.object.project_set.all()
+ else:
+ return self.object.project_set.filter(is_draft=False)
+
class OrganisationListView(generic.ListView):
model = models.Organisation
diff --git a/euth/projects/models.py b/euth/projects/models.py
--- a/euth/projects/models.py
+++ b/euth/projects/models.py
@@ -66,8 +66,8 @@
@functional.cached_property
def other_projects(self):
- other_projects = self.organisation.project_set.all().exclude(
- slug=self.slug)
+ other_projects = self.organisation.project_set\
+ .filter(is_draft=False).exclude(slug=self.slug)
return other_projects
@functional.cached_property
| {"golden_diff": "diff --git a/euth/organisations/views.py b/euth/organisations/views.py\n--- a/euth/organisations/views.py\n+++ b/euth/organisations/views.py\n@@ -6,6 +6,12 @@\n class OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n \n+ def visible_projects(self):\n+ if self.request.user in self.object.initiators.all():\n+ return self.object.project_set.all()\n+ else:\n+ return self.object.project_set.filter(is_draft=False)\n+\n \n class OrganisationListView(generic.ListView):\n model = models.Organisation\ndiff --git a/euth/projects/models.py b/euth/projects/models.py\n--- a/euth/projects/models.py\n+++ b/euth/projects/models.py\n@@ -66,8 +66,8 @@\n \n @functional.cached_property\n def other_projects(self):\n- other_projects = self.organisation.project_set.all().exclude(\n- slug=self.slug)\n+ other_projects = self.organisation.project_set\\\n+ .filter(is_draft=False).exclude(slug=self.slug)\n return other_projects\n \n @functional.cached_property\n", "issue": "Filter draft projects from all listings\nProjects that are in draft mode should be removed from most listings:\n- [x] latest projects for all users\n- [x] other projects for all users\n- [x] organisation page for non-initiators\n\n", "before_files": [{"content": "from django.views import generic\n\nfrom . import models\n\n\nclass OrganisationDetailView(generic.DetailView):\n model = models.Organisation\n\n\nclass OrganisationListView(generic.ListView):\n model = models.Organisation\n paginate_by = 10\n", "path": "euth/organisations/views.py"}, {"content": "from django.conf import settings\nfrom django.db import models\nfrom django.utils import functional, timezone\n\nfrom contrib.transforms import html_transforms\nfrom euth.contrib import base_models, validators\nfrom euth.organisations import models as org_models\n\n\nclass ProjectManager(models.Manager):\n\n def get_by_natural_key(self, name):\n return self.get(name=name)\n\n def featured(self):\n return self.filter(is_draft=False).order_by('-created')[:8]\n\n\nclass Project(base_models.TimeStampedModel):\n slug = models.SlugField(max_length=512, unique=True)\n name = models.CharField(max_length=512)\n organisation = models.ForeignKey(\n org_models.Organisation, on_delete=models.CASCADE)\n description = models.CharField(max_length=1024)\n information = models.TextField()\n is_public = models.BooleanField(default=True)\n is_draft = models.BooleanField(default=True)\n image = models.ImageField(\n upload_to='projects/backgrounds',\n blank=True,\n validators=[validators.validate_hero_image])\n participants = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_participant',\n blank=True,\n )\n moderators = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='project_moderator'\n )\n\n objects = ProjectManager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.information = html_transforms.clean_html_field(\n self.information)\n super(Project, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('project-detail', args=[str(self.slug)])\n\n def has_member(self, user):\n \"\"\"\n Everybody is member of all public projects and private projects can\n be joined as moderator or participant.\n \"\"\"\n return (\n (user.is_authenticated() and self.is_public)\n or (user in self.participants.all())\n or (user in self.moderators.all())\n )\n\n @functional.cached_property\n def other_projects(self):\n other_projects = self.organisation.project_set.all().exclude(\n slug=self.slug)\n return other_projects\n\n @functional.cached_property\n def is_private(self):\n return not self.is_public\n\n @functional.cached_property\n def active_phase(self):\n from euth.phases import models as phase_models\n return phase_models.Phase.objects\\\n .filter(module__project=self)\\\n .active_phases()\\\n .first()\n\n @property\n def days_left(self):\n if self.active_phase:\n today = timezone.now().replace(hour=0, minute=0, second=0)\n time_delta = self.active_phase.end_date - today\n return time_delta.days\n", "path": "euth/projects/models.py"}]} | 1,442 | 243 |
gh_patches_debug_28597 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NH: legislators scrape is failing
State: NH
Legislator scraper needs some attention
</issue>
<code>
[start of openstates/nh/legislators.py]
1 import re
2
3 from billy.scrape.legislators import LegislatorScraper, Legislator
4 from openstates.utils import LXMLMixin
5
6
7 class NHLegislatorScraper(LegislatorScraper, LXMLMixin):
8 jurisdiction = 'nh'
9 latest_only = True
10 members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt'
11
12 chamber_map = {'H': 'lower', 'S': 'upper'}
13 party_map = {
14 'D': 'Democratic',
15 'R': 'Republican',
16 'I': 'Independent',
17 'L': 'Libertarian',
18 }
19
20 def _get_photo(self, url, chamber):
21 """Attempts to find a portrait in the given legislator profile."""
22 doc = self.lxmlize(url)
23
24 if chamber == 'upper':
25 src = doc.xpath('//div[@id="page_content"]//img[contains(@src, '
26 '"images/senators") or contains(@src, "Senator")]/@src')
27 elif chamber == 'lower':
28 src = doc.xpath('//img[contains(@src, "images/memberpics")]/@src')
29
30 if src and 'nophoto' not in src[0]:
31 photo_url = src[0]
32 else:
33 photo_url = ''
34
35 return photo_url
36
37 def _parse_legislator(self, row, chamber, term):
38 # Capture legislator vitals.
39 first_name = row['FirstName']
40 middle_name = row['MiddleName']
41 last_name = row['lastname']
42 full_name = '{} {} {}'.format(first_name, middle_name, last_name)
43 full_name = re.sub(r'[\s]{2,}', ' ', full_name)
44
45 district = '{} {}'.format(row['county'], int(row['District'])).strip()
46 party = self.party_map[row['party']]
47 email = row['EMailAddress1']
48
49 legislator = Legislator(term, chamber, district, full_name,
50 first_name=first_name, last_name=last_name,
51 middle_name=middle_name, party=party,
52 email=email)
53
54 # Capture legislator office contact information.
55 district_address = '{}\n{}\n{}, {} {}'.format(row['street'],
56 row['address2'], row['city'], row['state'], row['zipcode']).strip()
57
58 legislator.add_office('district', 'Home Address',
59 address=district_address)
60
61 # Retrieve legislator portrait.
62 #profile_url = None
63 #if chamber == 'upper':
64 # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District'])
65 #elif chamber == 'lower':
66 # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no'])
67
68 #if profile_url:
69 # legislator['photo_url'] = self._get_photo(profile_url, chamber)
70 # legislator.add_source(profile_url)
71
72 return legislator
73
74 def _parse_members_txt(self):
75 lines = self.get(self.members_url).text.splitlines()
76
77 header = lines[0].split('\t')
78
79 for line in lines[1:]:
80 yield dict(zip(header, line.split('\t')))
81
82 def scrape(self, chamber, term):
83 for row in self._parse_members_txt():
84 if self.chamber_map[row['LegislativeBody']] == chamber:
85 leg = self._parse_legislator(row, chamber, term)
86 leg.add_source(self.members_url)
87 self.save_legislator(leg)
88
[end of openstates/nh/legislators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/nh/legislators.py b/openstates/nh/legislators.py
--- a/openstates/nh/legislators.py
+++ b/openstates/nh/legislators.py
@@ -38,13 +38,13 @@
# Capture legislator vitals.
first_name = row['FirstName']
middle_name = row['MiddleName']
- last_name = row['lastname']
+ last_name = row['LastName']
full_name = '{} {} {}'.format(first_name, middle_name, last_name)
full_name = re.sub(r'[\s]{2,}', ' ', full_name)
- district = '{} {}'.format(row['county'], int(row['District'])).strip()
- party = self.party_map[row['party']]
- email = row['EMailAddress1']
+ district = '{} {}'.format(row['County'], int(row['District'])).strip()
+ party = self.party_map[row['party'].upper()]
+ email = row['WorkEmail']
legislator = Legislator(term, chamber, district, full_name,
first_name=first_name, last_name=last_name,
@@ -52,8 +52,8 @@
email=email)
# Capture legislator office contact information.
- district_address = '{}\n{}\n{}, {} {}'.format(row['street'],
- row['address2'], row['city'], row['state'], row['zipcode']).strip()
+ district_address = '{}\n{}\n{}, {} {}'.format(row['Address'],
+ row['address2'], row['city'], row['State'], row['Zipcode']).strip()
legislator.add_office('district', 'Home Address',
address=district_address)
| {"golden_diff": "diff --git a/openstates/nh/legislators.py b/openstates/nh/legislators.py\n--- a/openstates/nh/legislators.py\n+++ b/openstates/nh/legislators.py\n@@ -38,13 +38,13 @@\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n- last_name = row['lastname']\n+ last_name = row['LastName']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n \n- district = '{} {}'.format(row['county'], int(row['District'])).strip()\n- party = self.party_map[row['party']]\n- email = row['EMailAddress1']\n+ district = '{} {}'.format(row['County'], int(row['District'])).strip()\n+ party = self.party_map[row['party'].upper()]\n+ email = row['WorkEmail']\n \n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n@@ -52,8 +52,8 @@\n email=email)\n \n # Capture legislator office contact information.\n- district_address = '{}\\n{}\\n{}, {} {}'.format(row['street'],\n- row['address2'], row['city'], row['state'], row['zipcode']).strip()\n+ district_address = '{}\\n{}\\n{}, {} {}'.format(row['Address'],\n+ row['address2'], row['city'], row['State'], row['Zipcode']).strip()\n \n legislator.add_office('district', 'Home Address',\n address=district_address)\n", "issue": "NH: legislators scrape is failing\nState: NH\r\n\r\nLegislator scraper needs some attention\n", "before_files": [{"content": "import re\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\n\nclass NHLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'nh'\n latest_only = True\n members_url = 'http://www.gencourt.state.nh.us/downloads/Members.txt'\n\n chamber_map = {'H': 'lower', 'S': 'upper'}\n party_map = {\n 'D': 'Democratic',\n 'R': 'Republican',\n 'I': 'Independent',\n 'L': 'Libertarian',\n }\n\n def _get_photo(self, url, chamber):\n \"\"\"Attempts to find a portrait in the given legislator profile.\"\"\"\n doc = self.lxmlize(url)\n\n if chamber == 'upper':\n src = doc.xpath('//div[@id=\"page_content\"]//img[contains(@src, '\n '\"images/senators\") or contains(@src, \"Senator\")]/@src')\n elif chamber == 'lower':\n src = doc.xpath('//img[contains(@src, \"images/memberpics\")]/@src')\n\n if src and 'nophoto' not in src[0]:\n photo_url = src[0]\n else:\n photo_url = ''\n\n return photo_url\n\n def _parse_legislator(self, row, chamber, term):\n # Capture legislator vitals.\n first_name = row['FirstName']\n middle_name = row['MiddleName']\n last_name = row['lastname']\n full_name = '{} {} {}'.format(first_name, middle_name, last_name)\n full_name = re.sub(r'[\\s]{2,}', ' ', full_name)\n\n district = '{} {}'.format(row['county'], int(row['District'])).strip()\n party = self.party_map[row['party']]\n email = row['EMailAddress1']\n\n legislator = Legislator(term, chamber, district, full_name,\n first_name=first_name, last_name=last_name,\n middle_name=middle_name, party=party,\n email=email)\n\n # Capture legislator office contact information.\n district_address = '{}\\n{}\\n{}, {} {}'.format(row['street'],\n row['address2'], row['city'], row['state'], row['zipcode']).strip()\n\n legislator.add_office('district', 'Home Address',\n address=district_address)\n\n # Retrieve legislator portrait.\n #profile_url = None\n #if chamber == 'upper':\n # profile_url = 'http://www.gencourt.state.nh.us/Senate/members/webpages/district{:02d}.aspx'.format(row['District'])\n #elif chamber == 'lower':\n # profile_url = 'http://www.gencourt.state.nh.us/house/members/member.aspx?member={}'.format(row['employee_no'])\n\n #if profile_url:\n # legislator['photo_url'] = self._get_photo(profile_url, chamber)\n # legislator.add_source(profile_url)\n\n return legislator\n\n def _parse_members_txt(self):\n lines = self.get(self.members_url).text.splitlines()\n\n header = lines[0].split('\\t')\n\n for line in lines[1:]:\n yield dict(zip(header, line.split('\\t')))\n\n def scrape(self, chamber, term):\n for row in self._parse_members_txt():\n if self.chamber_map[row['LegislativeBody']] == chamber:\n leg = self._parse_legislator(row, chamber, term)\n leg.add_source(self.members_url)\n self.save_legislator(leg)\n", "path": "openstates/nh/legislators.py"}]} | 1,514 | 378 |
gh_patches_debug_25734 | rasdani/github-patches | git_diff | GPflow__GPflow-1446 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ChangePoints kernel breaks for len(X) != len(X2)
<!-- Lines like this are comments and will be invisible -->
# Bug
There seems to be a bug for models using the Changepoints kernel, whereby given a model
m =gpflow.models.GPR(data=(X,y), kernel=k, mean_function=None)
with k a Changepoint kernel, (using a combination of any base kernels), the model won't produce a value for predict_y.
<!-- A clear and concise description of what the bug is. -->
## To reproduce
**Minimal, reproducible example**
<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->
```python
import numpy as np
import gpflow
X = np.linspace(0,100,100).reshape(100,1)
base_k1 = gpflow.kernels.Matern32(lengthscales=0.2)
base_k2 = gpflow.kernels.Matern32(lengthscales=2.0)
k = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)
k(X) # works
N = 25 # anything other than N=100 will reproduce the bug
xx = np.linspace(0,50,N).reshape(N,1)
k(X, xx) # breaks
```
**Stack trace, or error message**
```
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-25-d1dbc7941bae> in <module>
----> 1 k(X, xx)
~/Code/GPflow/gpflow/kernels/base.py in __call__(self, X, X2, full_cov, presliced)
170
171 else:
--> 172 return self.K(X, X2)
173
174 def __add__(self, other):
~/Code/GPflow/gpflow/kernels/changepoints.py in K(self, X, X2)
83 N = tf.shape(X)[0]
84 ones = tf.ones((N, N, 1), dtype=X.dtype)
---> 85 starters = tf.concat([ones, starters], axis=2)
86 stoppers = tf.concat([stoppers, ones], axis=2)
87
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py in concat(values, axis, name)
1515 dtype=dtypes.int32).get_shape().assert_has_rank(0)
1516 return identity(values[0], name=name)
-> 1517 return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
1518
1519
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_array_ops.py in concat_v2(values, axis, name)
1116 pass # Add nodes to the TensorFlow graph.
1117 except _core._NotOkStatusException as e:
-> 1118 _ops.raise_from_not_ok_status(e, name)
1119 # Add nodes to the TensorFlow graph.
1120 if not isinstance(values, (list, tuple)):
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py in raise_from_not_ok_status(e, name)
6604 message = e.message + (" name: " + name if name is not None else "")
6605 # pylint: disable=protected-access
-> 6606 six.raise_from(core._status_to_exception(e.code, message), None)
6607 # pylint: enable=protected-access
6608
~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/six.py in raise_from(value, from_value)
InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [100,100,1] vs. shape[1] = [100,25,1] [Op:ConcatV2] name: concat
```
## Expected behavior
Should return the kernel matrix of shape (100, 25).
## System information
* GPflow version: 2.0.1 (reproduced on `develop`)
* GPflow installed from: 'pip install gpflow'
* TensorFlow version: 2.1.0
* Python version 3.7.5
* Operating system Ubuntu Linux (18.04.3)
## Additional context
For me changing these two lines in changepoints.py or kernels did the trick:
```python
def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:
sig_X = self._sigmoids(X) # N x 1 x Ncp
sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X
# `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go
# from 1 -> 0, dimensions are N x N x Ncp
starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))
stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))
# prepend `starters` with ones and append ones to `stoppers` since the
# first kernel has no start and the last kernel has no end
N = tf.shape(X)[0]
M = tf.shape(X2)[0] if X2 is not None else N # THIS IS THE FIX
ones = tf.ones((N, M, 1), dtype=X.dtype) #PREVIOUSLY N WAS IN PLACE OF M HERE
starters = tf.concat([ones, starters], axis=2)
stoppers = tf.concat([stoppers, ones], axis=2)
```
However I have not run any kind of tests to make sure this would not break anything else (neither have I added a unit test for the changepoint kernel). It could be that I am using the changepoint kernel incorrectly, in which case it would be very helpful if a short demo of using the kernel in a model could be appended to the changepoint kernel notebook :)
</issue>
<code>
[start of gpflow/kernels/changepoints.py]
1 from collections.abc import Iterable
2 from typing import List, Optional, Union
3
4 import tensorflow as tf
5
6 from ..base import Parameter
7 from ..utilities import positive
8 from .base import Combination, Kernel
9
10
11 class ChangePoints(Combination):
12 r"""
13 The ChangePoints kernel defines a fixed number of change-points along a 1d
14 input space where different kernels govern different parts of the space.
15
16 The kernel is by multiplication and addition of the base kernels with
17 sigmoid functions (σ). A single change-point kernel is defined as:
18
19 K₁(x, x') * (1 - σ(x)) * (1 - σ(x')) + K₂(x, x') * σ(x) * σ(x')
20
21 where K₁ is deactivated around the change-point and K₂ is activated. The
22 single change-point version can be found in \citet{lloyd2014}. Each sigmoid
23 is a logistic function defined as:
24
25 σ(x) = 1 / (1 + exp{-s(x - x₀)})
26
27 parameterized by location "x₀" and steepness "s".
28
29 @incollection{lloyd2014,
30 author = {Lloyd, James Robert et al},
31 title = {Automatic Construction and Natural-language Description of Nonparametric Regression Models},
32 booktitle = {Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence},
33 year = {2014},
34 url = {http://dl.acm.org/citation.cfm?id=2893873.2894066},
35 }
36 """
37
38 def __init__(
39 self,
40 kernels: List[Kernel],
41 locations: List[float],
42 steepness: Union[float, List[float]] = 1.0,
43 name: Optional[str] = None,
44 ):
45 """
46 :param kernels: list of kernels defining the different regimes
47 :param locations: list of change-point locations in the 1d input space
48 :param steepness: the steepness parameter(s) of the sigmoids, this can be
49 common between them or decoupled
50 """
51 if len(kernels) != len(locations) + 1:
52 raise ValueError(
53 "Number of kernels ({nk}) must be one more than the number of "
54 "changepoint locations ({nl})".format(nk=len(kernels), nl=len(locations))
55 )
56
57 if isinstance(steepness, Iterable) and len(steepness) != len(locations):
58 raise ValueError(
59 "Dimension of steepness ({ns}) does not match number of changepoint "
60 "locations ({nl})".format(ns=len(steepness), nl=len(locations))
61 )
62
63 super().__init__(kernels, name=name)
64
65 self.locations = Parameter(locations)
66 self.steepness = Parameter(steepness, transform=positive())
67
68 def _set_kernels(self, kernels: List[Kernel]):
69 # it is not clear how to flatten out nested change-points
70 self.kernels = kernels
71
72 def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:
73 sig_X = self._sigmoids(X) # N x 1 x Ncp
74 sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X
75
76 # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go
77 # from 1 -> 0, dimensions are N x N x Ncp
78 starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))
79 stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))
80
81 # prepend `starters` with ones and append ones to `stoppers` since the
82 # first kernel has no start and the last kernel has no end
83 N = tf.shape(X)[0]
84 ones = tf.ones((N, N, 1), dtype=X.dtype)
85 starters = tf.concat([ones, starters], axis=2)
86 stoppers = tf.concat([stoppers, ones], axis=2)
87
88 # now combine with the underlying kernels
89 kernel_stack = tf.stack([k(X, X2) for k in self.kernels], axis=2)
90 return tf.reduce_sum(kernel_stack * starters * stoppers, axis=2)
91
92 def K_diag(self, X: tf.Tensor) -> tf.Tensor:
93 N = tf.shape(X)[0]
94 sig_X = tf.reshape(self._sigmoids(X), (N, -1)) # N x Ncp
95
96 ones = tf.ones((N, 1), dtype=X.dtype)
97 starters = tf.concat([ones, sig_X * sig_X], axis=1) # N x Ncp
98 stoppers = tf.concat([(1 - sig_X) * (1 - sig_X), ones], axis=1)
99
100 kernel_stack = tf.stack([k(X, full_cov=False) for k in self.kernels], axis=1)
101 return tf.reduce_sum(kernel_stack * starters * stoppers, axis=1)
102
103 def _sigmoids(self, X: tf.Tensor) -> tf.Tensor:
104 locations = tf.sort(self.locations) # ensure locations are ordered
105 locations = tf.reshape(locations, (1, 1, -1))
106 steepness = tf.reshape(self.steepness, (1, 1, -1))
107 return tf.sigmoid(steepness * (X[:, :, None] - locations))
108
[end of gpflow/kernels/changepoints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpflow/kernels/changepoints.py b/gpflow/kernels/changepoints.py
--- a/gpflow/kernels/changepoints.py
+++ b/gpflow/kernels/changepoints.py
@@ -70,18 +70,19 @@
self.kernels = kernels
def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:
- sig_X = self._sigmoids(X) # N x 1 x Ncp
- sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X
+ sig_X = self._sigmoids(X) # N1 x 1 x Ncp
+ sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X # N2 x 1 x Ncp
# `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go
- # from 1 -> 0, dimensions are N x N x Ncp
+ # from 1 -> 0, dimensions are N1 x N2 x Ncp
starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))
stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))
# prepend `starters` with ones and append ones to `stoppers` since the
# first kernel has no start and the last kernel has no end
- N = tf.shape(X)[0]
- ones = tf.ones((N, N, 1), dtype=X.dtype)
+ N1 = tf.shape(X)[0]
+ N2 = tf.shape(X2)[0] if X2 is not None else N1
+ ones = tf.ones((N1, N2, 1), dtype=X.dtype)
starters = tf.concat([ones, starters], axis=2)
stoppers = tf.concat([stoppers, ones], axis=2)
| {"golden_diff": "diff --git a/gpflow/kernels/changepoints.py b/gpflow/kernels/changepoints.py\n--- a/gpflow/kernels/changepoints.py\n+++ b/gpflow/kernels/changepoints.py\n@@ -70,18 +70,19 @@\n self.kernels = kernels\n \n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\n- sig_X = self._sigmoids(X) # N x 1 x Ncp\n- sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X\n+ sig_X = self._sigmoids(X) # N1 x 1 x Ncp\n+ sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X # N2 x 1 x Ncp\n \n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\n- # from 1 -> 0, dimensions are N x N x Ncp\n+ # from 1 -> 0, dimensions are N1 x N2 x Ncp\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\n \n # prepend `starters` with ones and append ones to `stoppers` since the\n # first kernel has no start and the last kernel has no end\n- N = tf.shape(X)[0]\n- ones = tf.ones((N, N, 1), dtype=X.dtype)\n+ N1 = tf.shape(X)[0]\n+ N2 = tf.shape(X2)[0] if X2 is not None else N1\n+ ones = tf.ones((N1, N2, 1), dtype=X.dtype)\n starters = tf.concat([ones, starters], axis=2)\n stoppers = tf.concat([stoppers, ones], axis=2)\n", "issue": "ChangePoints kernel breaks for len(X) != len(X2)\n<!-- Lines like this are comments and will be invisible -->\r\n\r\n# Bug \r\nThere seems to be a bug for models using the Changepoints kernel, whereby given a model \r\nm =gpflow.models.GPR(data=(X,y), kernel=k, mean_function=None)\r\nwith k a Changepoint kernel, (using a combination of any base kernels), the model won't produce a value for predict_y. \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To reproduce\r\n\r\n**Minimal, reproducible example**\r\n<!-- We need to be able to reproduce the bug by simply copy and pasting your code -->\r\n```python\r\nimport numpy as np\r\nimport gpflow\r\nX = np.linspace(0,100,100).reshape(100,1)\r\nbase_k1 = gpflow.kernels.Matern32(lengthscales=0.2)\r\nbase_k2 = gpflow.kernels.Matern32(lengthscales=2.0)\r\nk = gpflow.kernels.ChangePoints([base_k1, base_k2], [0.0], steepness=5.0)\r\nk(X) # works\r\n\r\nN = 25 # anything other than N=100 will reproduce the bug\r\nxx = np.linspace(0,50,N).reshape(N,1)\r\nk(X, xx) # breaks\r\n```\r\n\r\n**Stack trace, or error message**\r\n```\r\n---------------------------------------------------------------------------\r\nInvalidArgumentError Traceback (most recent call last)\r\n<ipython-input-25-d1dbc7941bae> in <module>\r\n----> 1 k(X, xx)\r\n\r\n~/Code/GPflow/gpflow/kernels/base.py in __call__(self, X, X2, full_cov, presliced)\r\n 170 \r\n 171 else:\r\n--> 172 return self.K(X, X2)\r\n 173 \r\n 174 def __add__(self, other):\r\n\r\n~/Code/GPflow/gpflow/kernels/changepoints.py in K(self, X, X2)\r\n 83 N = tf.shape(X)[0]\r\n 84 ones = tf.ones((N, N, 1), dtype=X.dtype)\r\n---> 85 starters = tf.concat([ones, starters], axis=2)\r\n 86 stoppers = tf.concat([stoppers, ones], axis=2)\r\n 87 \r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)\r\n 178 \"\"\"Call target, and fall back on dispatchers if there is a TypeError.\"\"\"\r\n 179 try:\r\n--> 180 return target(*args, **kwargs)\r\n 181 except (TypeError, ValueError):\r\n 182 # Note: convert_to_eager_tensor currently raises a ValueError, not a\r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py in concat(values, axis, name)\r\n 1515 dtype=dtypes.int32).get_shape().assert_has_rank(0)\r\n 1516 return identity(values[0], name=name)\r\n-> 1517 return gen_array_ops.concat_v2(values=values, axis=axis, name=name)\r\n 1518 \r\n 1519 \r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_array_ops.py in concat_v2(values, axis, name)\r\n 1116 pass # Add nodes to the TensorFlow graph.\r\n 1117 except _core._NotOkStatusException as e:\r\n-> 1118 _ops.raise_from_not_ok_status(e, name)\r\n 1119 # Add nodes to the TensorFlow graph.\r\n 1120 if not isinstance(values, (list, tuple)):\r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py in raise_from_not_ok_status(e, name)\r\n 6604 message = e.message + (\" name: \" + name if name is not None else \"\")\r\n 6605 # pylint: disable=protected-access\r\n-> 6606 six.raise_from(core._status_to_exception(e.code, message), None)\r\n 6607 # pylint: enable=protected-access\r\n 6608 \r\n\r\n~/anaconda3/envs/gpflux2/lib/python3.7/site-packages/six.py in raise_from(value, from_value)\r\n\r\nInvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [100,100,1] vs. shape[1] = [100,25,1] [Op:ConcatV2] name: concat\r\n```\r\n\r\n## Expected behavior\r\n\r\nShould return the kernel matrix of shape (100, 25).\r\n\r\n## System information\r\n\r\n* GPflow version: 2.0.1 (reproduced on `develop`)\r\n* GPflow installed from: 'pip install gpflow'\r\n* TensorFlow version: 2.1.0\r\n* Python version 3.7.5\r\n* Operating system Ubuntu Linux (18.04.3)\r\n\r\n## Additional context\r\n\r\nFor me changing these two lines in changepoints.py or kernels did the trick: \r\n\r\n```python\r\n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\r\n sig_X = self._sigmoids(X) # N x 1 x Ncp\r\n sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X\r\n\r\n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\r\n # from 1 -> 0, dimensions are N x N x Ncp\r\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\r\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\r\n\r\n # prepend `starters` with ones and append ones to `stoppers` since the\r\n # first kernel has no start and the last kernel has no end\r\n N = tf.shape(X)[0]\r\n M = tf.shape(X2)[0] if X2 is not None else N # THIS IS THE FIX\r\n ones = tf.ones((N, M, 1), dtype=X.dtype) #PREVIOUSLY N WAS IN PLACE OF M HERE\r\n starters = tf.concat([ones, starters], axis=2)\r\n stoppers = tf.concat([stoppers, ones], axis=2)\r\n```\r\n\r\nHowever I have not run any kind of tests to make sure this would not break anything else (neither have I added a unit test for the changepoint kernel). It could be that I am using the changepoint kernel incorrectly, in which case it would be very helpful if a short demo of using the kernel in a model could be appended to the changepoint kernel notebook :)\r\n\n", "before_files": [{"content": "from collections.abc import Iterable\nfrom typing import List, Optional, Union\n\nimport tensorflow as tf\n\nfrom ..base import Parameter\nfrom ..utilities import positive\nfrom .base import Combination, Kernel\n\n\nclass ChangePoints(Combination):\n r\"\"\"\n The ChangePoints kernel defines a fixed number of change-points along a 1d\n input space where different kernels govern different parts of the space.\n\n The kernel is by multiplication and addition of the base kernels with\n sigmoid functions (\u03c3). A single change-point kernel is defined as:\n\n K\u2081(x, x') * (1 - \u03c3(x)) * (1 - \u03c3(x')) + K\u2082(x, x') * \u03c3(x) * \u03c3(x')\n\n where K\u2081 is deactivated around the change-point and K\u2082 is activated. The\n single change-point version can be found in \\citet{lloyd2014}. Each sigmoid\n is a logistic function defined as:\n\n \u03c3(x) = 1 / (1 + exp{-s(x - x\u2080)})\n\n parameterized by location \"x\u2080\" and steepness \"s\".\n\n @incollection{lloyd2014,\n author = {Lloyd, James Robert et al},\n title = {Automatic Construction and Natural-language Description of Nonparametric Regression Models},\n booktitle = {Proceedings of the Twenty-Eighth AAAI Conference on Artificial Intelligence},\n year = {2014},\n url = {http://dl.acm.org/citation.cfm?id=2893873.2894066},\n }\n \"\"\"\n\n def __init__(\n self,\n kernels: List[Kernel],\n locations: List[float],\n steepness: Union[float, List[float]] = 1.0,\n name: Optional[str] = None,\n ):\n \"\"\"\n :param kernels: list of kernels defining the different regimes\n :param locations: list of change-point locations in the 1d input space\n :param steepness: the steepness parameter(s) of the sigmoids, this can be\n common between them or decoupled\n \"\"\"\n if len(kernels) != len(locations) + 1:\n raise ValueError(\n \"Number of kernels ({nk}) must be one more than the number of \"\n \"changepoint locations ({nl})\".format(nk=len(kernels), nl=len(locations))\n )\n\n if isinstance(steepness, Iterable) and len(steepness) != len(locations):\n raise ValueError(\n \"Dimension of steepness ({ns}) does not match number of changepoint \"\n \"locations ({nl})\".format(ns=len(steepness), nl=len(locations))\n )\n\n super().__init__(kernels, name=name)\n\n self.locations = Parameter(locations)\n self.steepness = Parameter(steepness, transform=positive())\n\n def _set_kernels(self, kernels: List[Kernel]):\n # it is not clear how to flatten out nested change-points\n self.kernels = kernels\n\n def K(self, X: tf.Tensor, X2: Optional[tf.Tensor] = None) -> tf.Tensor:\n sig_X = self._sigmoids(X) # N x 1 x Ncp\n sig_X2 = self._sigmoids(X2) if X2 is not None else sig_X\n\n # `starters` are the sigmoids going from 0 -> 1, whilst `stoppers` go\n # from 1 -> 0, dimensions are N x N x Ncp\n starters = sig_X * tf.transpose(sig_X2, perm=(1, 0, 2))\n stoppers = (1 - sig_X) * tf.transpose((1 - sig_X2), perm=(1, 0, 2))\n\n # prepend `starters` with ones and append ones to `stoppers` since the\n # first kernel has no start and the last kernel has no end\n N = tf.shape(X)[0]\n ones = tf.ones((N, N, 1), dtype=X.dtype)\n starters = tf.concat([ones, starters], axis=2)\n stoppers = tf.concat([stoppers, ones], axis=2)\n\n # now combine with the underlying kernels\n kernel_stack = tf.stack([k(X, X2) for k in self.kernels], axis=2)\n return tf.reduce_sum(kernel_stack * starters * stoppers, axis=2)\n\n def K_diag(self, X: tf.Tensor) -> tf.Tensor:\n N = tf.shape(X)[0]\n sig_X = tf.reshape(self._sigmoids(X), (N, -1)) # N x Ncp\n\n ones = tf.ones((N, 1), dtype=X.dtype)\n starters = tf.concat([ones, sig_X * sig_X], axis=1) # N x Ncp\n stoppers = tf.concat([(1 - sig_X) * (1 - sig_X), ones], axis=1)\n\n kernel_stack = tf.stack([k(X, full_cov=False) for k in self.kernels], axis=1)\n return tf.reduce_sum(kernel_stack * starters * stoppers, axis=1)\n\n def _sigmoids(self, X: tf.Tensor) -> tf.Tensor:\n locations = tf.sort(self.locations) # ensure locations are ordered\n locations = tf.reshape(locations, (1, 1, -1))\n steepness = tf.reshape(self.steepness, (1, 1, -1))\n return tf.sigmoid(steepness * (X[:, :, None] - locations))\n", "path": "gpflow/kernels/changepoints.py"}]} | 3,547 | 462 |
gh_patches_debug_2764 | rasdani/github-patches | git_diff | ivy-llc__ivy-18208 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
expand
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/manipulation.py]
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 )
10
11
12 @to_ivy_arrays_and_back
13 def reshape(x, shape):
14 return ivy.reshape(x, shape)
15
16
17 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
18 @to_ivy_arrays_and_back
19 def abs(x, name=None):
20 return ivy.abs(x)
21
22
23 absolute = abs
24
25
26 @to_ivy_arrays_and_back
27 def stack(x, axis=0, name=None):
28 return ivy.stack(x, axis=axis)
29
30
31 @with_unsupported_dtypes({"2.5.0 and below": ("int8", "int16")}, "paddle")
32 @to_ivy_arrays_and_back
33 def concat(x, axis, name=None):
34 return ivy.concat(x, axis=axis)
35
36
37 @with_unsupported_dtypes(
38 {"2.5.0 and below": ("int8", "uint8", "int16", "float16")},
39 "paddle",
40 )
41 @to_ivy_arrays_and_back
42 def tile(x, repeat_times, name=None):
43 return ivy.tile(x, repeats=repeat_times)
44
45
46 @with_unsupported_dtypes(
47 {"2.5.0 and below": ("int16", "complex64", "complex128")},
48 "paddle",
49 )
50 @to_ivy_arrays_and_back
51 def split(x, num_or_sections, axis=0, name=None):
52 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
53
54
55 @with_unsupported_dtypes(
56 {"2.5.0 and below": ("float16", "bfloat16", "int8", "int16")},
57 "paddle",
58 )
59 @to_ivy_arrays_and_back
60 def squeeze(x, axis=None, name=None):
61 return ivy.squeeze(x, axis=axis)
62
63
64 @with_supported_dtypes(
65 {
66 "2.5.0 and below": (
67 "bool",
68 "float16",
69 "float32",
70 "float64",
71 "int32",
72 "int64",
73 "uint8",
74 )
75 },
76 "paddle",
77 )
78 @to_ivy_arrays_and_back
79 def cast(x, dtype):
80 return ivy.astype(x, dtype)
81
[end of ivy/functional/frontends/paddle/tensor/manipulation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py
--- a/ivy/functional/frontends/paddle/tensor/manipulation.py
+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py
@@ -61,6 +61,15 @@
return ivy.squeeze(x, axis=axis)
+@with_supported_dtypes(
+ {"2.5.0 and below": ("bool", "float32", "float64", "int32", "int64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def expand(x, shape, name=None):
+ return ivy.expand(x, shape)
+
+
@with_supported_dtypes(
{
"2.5.0 and below": (
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/manipulation.py b/ivy/functional/frontends/paddle/tensor/manipulation.py\n--- a/ivy/functional/frontends/paddle/tensor/manipulation.py\n+++ b/ivy/functional/frontends/paddle/tensor/manipulation.py\n@@ -61,6 +61,15 @@\n return ivy.squeeze(x, axis=axis)\n \n \n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def expand(x, shape, name=None):\n+ return ivy.expand(x, shape)\n+\n+\n @with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n", "issue": "expand\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape):\n return ivy.reshape(x, shape)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\nabsolute = abs\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n", "path": "ivy/functional/frontends/paddle/tensor/manipulation.py"}]} | 1,257 | 192 |
gh_patches_debug_8251 | rasdani/github-patches | git_diff | rootpy__rootpy-489 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
List index out of range
I'm in ipython, the latest released rootpy, and I get this when I try to access a TTree called `composed`.
``` ipython
In [9]: f.composed
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-9-b49340cac05a> in <module>()
----> 1 f.composed
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in __getattr__(self, attr)
89 in Get this can end up in an "infinite" recursion and stack overflow
90 """
---> 91 return self.Get(attr)
92
93 def __getitem__(self, name):
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in get(self, name, **kwargs)
52 thing = _dir.Get(path, **kwargs)
53 else:
---> 54 thing = f(self, _name, **kwargs)
55 if isinstance(thing, _DirectoryBase):
56 thing._parent = self
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in Get(self, name, **kwargs)
118 if not thing:
119 raise DoesNotExist
--> 120 return asrootpy(thing, **kwargs)
121
122 def GetRaw(self, name):
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in asrootpy(thing, **kwargs)
113
114 thing_cls = thing.__class__
--> 115 rootpy_cls = lookup(thing_cls)
116 if rootpy_cls is None:
117 log.warn("a subclass of %s is not implemented in rootpy" %
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup(cls)
131
132 cls_name = cls.__name__
--> 133 return lookup_by_name(cls_name)
134
135
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup_by_name(cls_name)
149 path, rootpy_cls_name = '.'.join(path_tokens[:-1]), path_tokens[-1]
150 rootpy_module = __import__(
--> 151 path, globals(), locals(), [rootpy_cls_name], -1)
152 rootpy_cls = getattr(rootpy_module, rootpy_cls_name)
153 if dynamic_kwargs is not None:
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/__init__.py in <module>()
3 from .. import log; log = log[__name__]
4
----> 5 from .buffer import TreeBuffer
6 from .tree import Tree
7 from .model import TreeModel
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/buffer.py in <module>()
10 from .. import create
11 from ..core import _resetable_mixin, _copy_construct_mixin
---> 12 from .. import stl
13
14
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/stl.py in <module>()
43 from .defaults import extra_initialization
44 from .util.cpp import CPPGrammar
---> 45 from . import compiled
46 from . import userdata
47 from . import lookup_by_name, register, QROOT
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/compiled/__init__.py in <module>()
16 import ROOT
17
---> 18 import rootpy.userdata as userdata
19
20 from .. import log; log = log[__name__]
/home/pwaller/.local/lib/python2.7/site-packages/rootpy/userdata.py in <module>()
40 DATA_ROOT = CONFIG_ROOT = None
41 if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and
---> 42 not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):
43 DATA_ROOT = ensure_directory('ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')
44 CONFIG_ROOT = ensure_directory('ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')
IndexError: list index out of range
In [10]: import sys
In [11]: sys.argv
Out[11]: []
```
</issue>
<code>
[start of rootpy/userdata.py]
1 # Copyright 2012 the rootpy developers
2 # distributed under the terms of the GNU General Public License
3 """
4 This module handles creation of the user-data area
5 """
6 from __future__ import absolute_import
7
8 import os
9 import sys
10 import tempfile
11 import atexit
12 from os.path import expanduser, expandvars, exists, isdir, join as pjoin
13 from platform import machine
14
15 from . import log; log = log[__name__]
16 from . import QROOT
17 from .defaults import extra_initialization
18
19 __all__ = [
20 'DATA_ROOT',
21 'CONFIG_ROOT',
22 'BINARY_PATH',
23 'ARCH',
24 ]
25
26 if "XDG_CONFIG_HOME" not in os.environ:
27 os.environ["XDG_CONFIG_HOME"] = expanduser('~/.config')
28 if "XDG_CACHE_HOME" not in os.environ:
29 os.environ["XDG_CACHE_HOME"] = expanduser('~/.cache')
30
31
32 def ensure_directory(variable, default):
33 path = os.getenv(variable)
34 if path is None:
35 path = expandvars(default)
36 else:
37 path = expandvars(expanduser(path))
38
39 # check if expanduser failed:
40 if path.startswith('~'):
41 path = None
42 elif not exists(path):
43 os.makedirs(path)
44 elif not isdir(path):
45 # A file at path already exists
46 path = None
47 return path
48
49
50 DATA_ROOT = CONFIG_ROOT = None
51 if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and
52 not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):
53 DATA_ROOT = ensure_directory(
54 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')
55 CONFIG_ROOT = ensure_directory(
56 'ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')
57
58 if DATA_ROOT is None:
59 log.info("Placing user data in /tmp.")
60 log.warning(
61 "Make sure '~/.cache/rootpy' or $ROOTPY_DATA is a writable "
62 "directory so that it isn't necessary to recreate all user "
63 "data each time")
64
65 DATA_ROOT = tempfile.mkdtemp()
66
67 @atexit.register
68 def __cleanup():
69 import shutil
70 shutil.rmtree(DATA_ROOT)
71
72 BINARY_PATH = None
73
74 ARCH = "{0}-{1}".format(machine(), QROOT.gROOT.GetVersionInt())
75 if BINARY_PATH is None:
76 BINARY_PATH = pjoin(DATA_ROOT, ARCH)
77
78
79 @extra_initialization
80 def show_binary_path():
81 log.debug("Using binary path: {0}".format(BINARY_PATH))
82
[end of rootpy/userdata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rootpy/userdata.py b/rootpy/userdata.py
--- a/rootpy/userdata.py
+++ b/rootpy/userdata.py
@@ -48,8 +48,14 @@
DATA_ROOT = CONFIG_ROOT = None
-if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and
- not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):
+
+in_nosetests = False
+if sys.argv and sys.argv[0].endswith('nosetests'):
+ in_nosetests = True
+
+grid_mode = os.getenv('ROOTPY_GRIDMODE') in ('1', 'true')
+
+if (os.getenv('DEBUG', None) or not (grid_mode or in_nosetests)):
DATA_ROOT = ensure_directory(
'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')
CONFIG_ROOT = ensure_directory(
| {"golden_diff": "diff --git a/rootpy/userdata.py b/rootpy/userdata.py\n--- a/rootpy/userdata.py\n+++ b/rootpy/userdata.py\n@@ -48,8 +48,14 @@\n \n \n DATA_ROOT = CONFIG_ROOT = None\n-if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and\n- not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):\n+\n+in_nosetests = False\n+if sys.argv and sys.argv[0].endswith('nosetests'):\n+ in_nosetests = True\n+\n+grid_mode = os.getenv('ROOTPY_GRIDMODE') in ('1', 'true')\n+\n+if (os.getenv('DEBUG', None) or not (grid_mode or in_nosetests)):\n DATA_ROOT = ensure_directory(\n 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n CONFIG_ROOT = ensure_directory(\n", "issue": "List index out of range\nI'm in ipython, the latest released rootpy, and I get this when I try to access a TTree called `composed`.\n\n``` ipython\nIn [9]: f.composed\n---------------------------------------------------------------------------\nIndexError Traceback (most recent call last)\n<ipython-input-9-b49340cac05a> in <module>()\n----> 1 f.composed\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in __getattr__(self, attr)\n 89 in Get this can end up in an \"infinite\" recursion and stack overflow\n 90 \"\"\"\n---> 91 return self.Get(attr)\n 92 \n 93 def __getitem__(self, name):\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in get(self, name, **kwargs)\n 52 thing = _dir.Get(path, **kwargs)\n 53 else:\n---> 54 thing = f(self, _name, **kwargs)\n 55 if isinstance(thing, _DirectoryBase):\n 56 thing._parent = self\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/io/file.py in Get(self, name, **kwargs)\n 118 if not thing:\n 119 raise DoesNotExist\n--> 120 return asrootpy(thing, **kwargs)\n 121 \n 122 def GetRaw(self, name):\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in asrootpy(thing, **kwargs)\n 113 \n 114 thing_cls = thing.__class__\n--> 115 rootpy_cls = lookup(thing_cls)\n 116 if rootpy_cls is None:\n 117 log.warn(\"a subclass of %s is not implemented in rootpy\" %\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup(cls)\n 131 \n 132 cls_name = cls.__name__\n--> 133 return lookup_by_name(cls_name)\n 134 \n 135 \n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/__init__.py in lookup_by_name(cls_name)\n 149 path, rootpy_cls_name = '.'.join(path_tokens[:-1]), path_tokens[-1]\n 150 rootpy_module = __import__(\n--> 151 path, globals(), locals(), [rootpy_cls_name], -1)\n 152 rootpy_cls = getattr(rootpy_module, rootpy_cls_name)\n 153 if dynamic_kwargs is not None:\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/__init__.py in <module>()\n 3 from .. import log; log = log[__name__]\n 4 \n----> 5 from .buffer import TreeBuffer\n 6 from .tree import Tree\n 7 from .model import TreeModel\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/tree/buffer.py in <module>()\n 10 from .. import create\n 11 from ..core import _resetable_mixin, _copy_construct_mixin\n---> 12 from .. import stl\n 13 \n 14 \n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/stl.py in <module>()\n 43 from .defaults import extra_initialization\n 44 from .util.cpp import CPPGrammar\n---> 45 from . import compiled\n 46 from . import userdata\n 47 from . import lookup_by_name, register, QROOT\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/compiled/__init__.py in <module>()\n 16 import ROOT\n 17 \n---> 18 import rootpy.userdata as userdata\n 19 \n 20 from .. import log; log = log[__name__]\n\n/home/pwaller/.local/lib/python2.7/site-packages/rootpy/userdata.py in <module>()\n 40 DATA_ROOT = CONFIG_ROOT = None\n 41 if (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and\n---> 42 not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):\n 43 DATA_ROOT = ensure_directory('ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n 44 CONFIG_ROOT = ensure_directory('ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')\n\nIndexError: list index out of range\n\nIn [10]: import sys\n\nIn [11]: sys.argv\nOut[11]: []\n```\n\n", "before_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\nThis module handles creation of the user-data area\n\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport tempfile\nimport atexit\nfrom os.path import expanduser, expandvars, exists, isdir, join as pjoin\nfrom platform import machine\n\nfrom . import log; log = log[__name__]\nfrom . import QROOT\nfrom .defaults import extra_initialization\n\n__all__ = [\n 'DATA_ROOT',\n 'CONFIG_ROOT',\n 'BINARY_PATH',\n 'ARCH',\n]\n\nif \"XDG_CONFIG_HOME\" not in os.environ:\n os.environ[\"XDG_CONFIG_HOME\"] = expanduser('~/.config')\nif \"XDG_CACHE_HOME\" not in os.environ:\n os.environ[\"XDG_CACHE_HOME\"] = expanduser('~/.cache')\n\n\ndef ensure_directory(variable, default):\n path = os.getenv(variable)\n if path is None:\n path = expandvars(default)\n else:\n path = expandvars(expanduser(path))\n\n # check if expanduser failed:\n if path.startswith('~'):\n path = None\n elif not exists(path):\n os.makedirs(path)\n elif not isdir(path):\n # A file at path already exists\n path = None\n return path\n\n\nDATA_ROOT = CONFIG_ROOT = None\nif (os.getenv('ROOTPY_GRIDMODE') not in ('1', 'true') and\n not sys.argv[0].endswith('nosetests')) or os.getenv('DEBUG', None):\n DATA_ROOT = ensure_directory(\n 'ROOTPY_DATA', '${XDG_CACHE_HOME}/rootpy')\n CONFIG_ROOT = ensure_directory(\n 'ROOTPY_CONFIG', '${XDG_CONFIG_HOME}/rootpy')\n\nif DATA_ROOT is None:\n log.info(\"Placing user data in /tmp.\")\n log.warning(\n \"Make sure '~/.cache/rootpy' or $ROOTPY_DATA is a writable \"\n \"directory so that it isn't necessary to recreate all user \"\n \"data each time\")\n\n DATA_ROOT = tempfile.mkdtemp()\n\n @atexit.register\n def __cleanup():\n import shutil\n shutil.rmtree(DATA_ROOT)\n\nBINARY_PATH = None\n\nARCH = \"{0}-{1}\".format(machine(), QROOT.gROOT.GetVersionInt())\nif BINARY_PATH is None:\n BINARY_PATH = pjoin(DATA_ROOT, ARCH)\n\n\n@extra_initialization\ndef show_binary_path():\n log.debug(\"Using binary path: {0}\".format(BINARY_PATH))\n", "path": "rootpy/userdata.py"}]} | 2,333 | 206 |
gh_patches_debug_7504 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3
4 from types import MethodType
5 from typing import Optional, Tuple
6
7 import torch
8 import torch.nn.functional as F
9 from transformers.models.llama.modeling_llama import (
10 LlamaRMSNorm,
11 LlamaAttention,
12 LlamaModel,
13 LlamaForCausalLM,
14 apply_rotary_pos_emb,
15 repeat_kv,
16 )
17
18 from colossalai.logging import get_dist_logger
19 from einops import rearrange
20
21 from flash_attn.bert_padding import pad_input, unpad_input
22 from flash_attn.flash_attn_interface import (
23 flash_attn_func,
24 flash_attn_varlen_kvpacked_func,
25 )
26 from flash_attn.ops.rms_norm import rms_norm
27
28
29 logger = get_dist_logger()
30
31
32 def _prepare_decoder_attention_mask(
33 self: LlamaModel,
34 attention_mask: torch.BoolTensor,
35 input_shape: torch.Size,
36 inputs_embeds: torch.Tensor,
37 past_key_values_length: int,
38 ) -> Optional[torch.Tensor]:
39 """
40 Decoder attetion mask
41 """
42 if past_key_values_length > 0 and attention_mask is not None:
43 attention_mask = torch.cat(
44 tensors=(
45 torch.full(
46 size=(input_shape[0], past_key_values_length),
47 fill_value=True,
48 dtype=attention_mask.dtype,
49 device=attention_mask.device,
50 ),
51 attention_mask,
52 ),
53 dim=-1,
54 ) # (bsz, past_key_values_length + q_len)
55 if attention_mask is not None and torch.all(attention_mask):
56 return None # Faster
57 return attention_mask
58
59
60 def attention_forward(
61 self: LlamaAttention,
62 hidden_states: torch.Tensor,
63 attention_mask: Optional[torch.Tensor] = None,
64 position_ids: Optional[torch.LongTensor] = None,
65 past_key_value: Optional[Tuple[torch.Tensor]] = None,
66 output_attentions: bool = False,
67 use_cache: bool = False,
68 ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
69 """
70 Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.
71 """
72 if output_attentions:
73 logger.warning(
74 "Argument `output_attentions` is not supported for flash-attention patched `LlamaAttention`, "
75 "return `None` instead."
76 )
77
78 bsz, q_len, _ = hidden_states.size()
79
80 if self.config.pretraining_tp > 1:
81 q_slicing, kv_slicing = (
82 dim // self.config.pretraining_tp
83 for dim in (
84 self.num_heads * self.head_dim,
85 self.num_key_value_heads * self.head_dim,
86 )
87 ) # `Tuple[int, int]`
88 q_slices, k_slices, v_slices = (
89 proj.weight.split(slicing, dim=0)
90 for proj, slicing in (
91 (self.q_proj, q_slicing),
92 (self.k_proj, kv_slicing),
93 (self.v_proj, kv_slicing),
94 )
95 ) # Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor], Tuple[torch.Tensor]]
96 q, k, v = (
97 torch.cat(
98 [F.linear(hidden_states, slices[i]) for i in range(self.config.pretraining_tp)],
99 dim=-1,
100 )
101 for slices in (q_slices, k_slices, v_slices)
102 )
103 # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:
104 # (bsz, q_len, num_heads * head_dim),
105 # (bsz, q_len, num_key_value_heads * head_dim),
106 # (bsz, q_len, num_key_value_heads * head_dim)
107 else:
108 q, k, v = (proj(hidden_states) for proj in (self.q_proj, self.k_proj, self.v_proj))
109 # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:
110 # (bsz, q_len, num_heads * head_dim),
111 # (bsz, q_len, num_key_value_heads * head_dim),
112 # (bsz, q_len, num_key_value_heads * head_dim)
113
114 # (bsz, q_len, num_heads * head_dim) -> (bsz, num_heads, q_len, head_dim);
115 # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim);
116 # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim)
117 q, k, v = (
118 states.view(bsz, q_len, num_heads, self.head_dim).transpose(1, 2)
119 for states, num_heads in (
120 (q, self.num_heads),
121 (k, self.num_key_value_heads),
122 (v, self.num_key_value_heads),
123 )
124 )
125 kv_len = k.shape[-2] # initially, `kv_len` == `q_len`
126 past_kv_len = 0
127 if past_key_value is not None:
128 # if `past_key_value` is not None, `kv_len` > `q_len`.
129 past_kv_len = past_key_value[0].shape[-2]
130 kv_len += past_kv_len
131
132 # two `torch.Tensor` objs of shape (1, 1, kv_len, head_dim)
133 cos, sin = self.rotary_emb(v, seq_len=kv_len)
134 # (bsz, num_heads, q_len, head_dim), (bsz, num_key_value_heads, q_len, head_dim)
135 q, k = apply_rotary_pos_emb(q=q, k=k, cos=cos, sin=sin, position_ids=position_ids)
136 if past_key_value is not None:
137 # reuse k, v, self_attention
138 k = torch.cat([past_key_value[0], k], dim=2)
139 v = torch.cat([past_key_value[1], v], dim=2)
140
141 past_key_value = (k, v) if use_cache else None
142
143 # repeat k/v heads if n_kv_heads < n_heads
144 k = repeat_kv(hidden_states=k, n_rep=self.num_key_value_groups)
145 # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)
146 v = repeat_kv(hidden_states=v, n_rep=self.num_key_value_groups)
147 # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)
148
149 key_padding_mask = attention_mask
150 # (bsz, num_heads, q_len, head_dim) -> (bsz, q_len, num_heads, head_dim)
151 q, k, v = (states.transpose(1, 2) for states in (q, k, v))
152
153 if past_kv_len > 0:
154 q = torch.cat(
155 tensors=(
156 torch.full(
157 size=(bsz, past_kv_len, self.num_heads, self.head_dim),
158 fill_value=0.0,
159 dtype=q.dtype,
160 device=q.device,
161 ),
162 q,
163 ),
164 dim=1,
165 ) # (bsz, past_kv_len + q_len, num_heads, head_dim)
166
167 if key_padding_mask is None:
168 # (bsz, past_kv_len + q_len, num_heads, head_dim)
169 output = flash_attn_func(q=q, k=k, v=v, dropout_p=0.0, softmax_scale=None, causal=True) # (bsz, )
170 output = rearrange(output, pattern="... h d -> ... (h d)") # (bsz, past_kv_len + q_len, num_heads * head_dim)
171 else:
172 q, indices, cu_q_lens, max_q_len = unpad_input(hidden_states=q, attention_mask=key_padding_mask)
173 kv, _, cu_kv_lens, max_kv_len = unpad_input(
174 hidden_states=torch.stack(tensors=(k, v), dim=2),
175 attention_mask=key_padding_mask,
176 )
177 output_unpad = flash_attn_varlen_kvpacked_func(
178 q=q,
179 kv=kv,
180 cu_seqlens_q=cu_q_lens,
181 cu_seqlens_k=cu_kv_lens,
182 max_seqlen_q=max_q_len,
183 max_seqlen_k=max_kv_len,
184 dropout_p=0.0,
185 softmax_scale=None,
186 causal=True,
187 )
188 output = pad_input(
189 hidden_states=rearrange(output_unpad, pattern="nnz h d -> nnz (h d)"),
190 indices=indices,
191 batch=bsz,
192 seqlen=past_kv_len + q_len,
193 ) # (bsz, past_kv_len + q_len, num_heads * head_dim)
194
195 if past_kv_len > 0:
196 # Strip off the zero query outputs.
197 output = output[:, past_kv_len:, ...] # (bsz, q_len, num_heads * head_dim)
198 output = self.o_proj(output) # (bsz, q_len, hidden_size)
199 return output, None, past_key_value
200
201
202 def rms_norm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor) -> torch.Tensor:
203 """
204 Formard function for RMS Norm
205 """
206 return rms_norm(x=hidden_states, weight=self.weight, epsilon=self.variance_epsilon)
207
208
209 def replace_with_flash_attention(model: LlamaForCausalLM) -> None:
210 for name, module in model.named_modules():
211 if isinstance(module, LlamaAttention):
212 module.forward = MethodType(attention_forward, module)
213 if isinstance(module, LlamaModel):
214 module._prepare_decoder_attention_mask = MethodType(_prepare_decoder_attention_mask, module)
215 if isinstance(module, LlamaRMSNorm):
216 module.forward = MethodType(rms_norm_forward, module)
217
[end of applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py
--- a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py
+++ b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py
@@ -65,6 +65,7 @@
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: bool = False,
use_cache: bool = False,
+ **kwargs
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""
Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.
| {"golden_diff": "diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py\n--- a/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py\n+++ b/applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py\n@@ -65,6 +65,7 @@\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n+ **kwargs\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"\n Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom types import MethodType\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom transformers.models.llama.modeling_llama import (\n LlamaRMSNorm,\n LlamaAttention,\n LlamaModel,\n LlamaForCausalLM,\n apply_rotary_pos_emb,\n repeat_kv,\n)\n\nfrom colossalai.logging import get_dist_logger\nfrom einops import rearrange\n\nfrom flash_attn.bert_padding import pad_input, unpad_input\nfrom flash_attn.flash_attn_interface import (\n flash_attn_func,\n flash_attn_varlen_kvpacked_func,\n)\nfrom flash_attn.ops.rms_norm import rms_norm\n\n\nlogger = get_dist_logger()\n\n\ndef _prepare_decoder_attention_mask(\n self: LlamaModel,\n attention_mask: torch.BoolTensor,\n input_shape: torch.Size,\n inputs_embeds: torch.Tensor,\n past_key_values_length: int,\n) -> Optional[torch.Tensor]:\n \"\"\"\n Decoder attetion mask\n \"\"\"\n if past_key_values_length > 0 and attention_mask is not None:\n attention_mask = torch.cat(\n tensors=(\n torch.full(\n size=(input_shape[0], past_key_values_length),\n fill_value=True,\n dtype=attention_mask.dtype,\n device=attention_mask.device,\n ),\n attention_mask,\n ),\n dim=-1,\n ) # (bsz, past_key_values_length + q_len)\n if attention_mask is not None and torch.all(attention_mask):\n return None # Faster\n return attention_mask\n\n\ndef attention_forward(\n self: LlamaAttention,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"\n Re-define LLaMA-2 `LlamaAttention` forward method using flash-attention.\n \"\"\"\n if output_attentions:\n logger.warning(\n \"Argument `output_attentions` is not supported for flash-attention patched `LlamaAttention`, \"\n \"return `None` instead.\"\n )\n\n bsz, q_len, _ = hidden_states.size()\n\n if self.config.pretraining_tp > 1:\n q_slicing, kv_slicing = (\n dim // self.config.pretraining_tp\n for dim in (\n self.num_heads * self.head_dim,\n self.num_key_value_heads * self.head_dim,\n )\n ) # `Tuple[int, int]`\n q_slices, k_slices, v_slices = (\n proj.weight.split(slicing, dim=0)\n for proj, slicing in (\n (self.q_proj, q_slicing),\n (self.k_proj, kv_slicing),\n (self.v_proj, kv_slicing),\n )\n ) # Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor], Tuple[torch.Tensor]]\n q, k, v = (\n torch.cat(\n [F.linear(hidden_states, slices[i]) for i in range(self.config.pretraining_tp)],\n dim=-1,\n )\n for slices in (q_slices, k_slices, v_slices)\n )\n # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:\n # (bsz, q_len, num_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim)\n else:\n q, k, v = (proj(hidden_states) for proj in (self.q_proj, self.k_proj, self.v_proj))\n # `Tuple[torch.Tensor, torch.Tensor, torch.Tensor]` of shape:\n # (bsz, q_len, num_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim),\n # (bsz, q_len, num_key_value_heads * head_dim)\n\n # (bsz, q_len, num_heads * head_dim) -> (bsz, num_heads, q_len, head_dim);\n # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim);\n # (bsz, q_len, num_key_value_heads * head_dim) -> (bsz, num_key_value_heads, q_len, head_dim)\n q, k, v = (\n states.view(bsz, q_len, num_heads, self.head_dim).transpose(1, 2)\n for states, num_heads in (\n (q, self.num_heads),\n (k, self.num_key_value_heads),\n (v, self.num_key_value_heads),\n )\n )\n kv_len = k.shape[-2] # initially, `kv_len` == `q_len`\n past_kv_len = 0\n if past_key_value is not None:\n # if `past_key_value` is not None, `kv_len` > `q_len`.\n past_kv_len = past_key_value[0].shape[-2]\n kv_len += past_kv_len\n\n # two `torch.Tensor` objs of shape (1, 1, kv_len, head_dim)\n cos, sin = self.rotary_emb(v, seq_len=kv_len)\n # (bsz, num_heads, q_len, head_dim), (bsz, num_key_value_heads, q_len, head_dim)\n q, k = apply_rotary_pos_emb(q=q, k=k, cos=cos, sin=sin, position_ids=position_ids)\n if past_key_value is not None:\n # reuse k, v, self_attention\n k = torch.cat([past_key_value[0], k], dim=2)\n v = torch.cat([past_key_value[1], v], dim=2)\n\n past_key_value = (k, v) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n k = repeat_kv(hidden_states=k, n_rep=self.num_key_value_groups)\n # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)\n v = repeat_kv(hidden_states=v, n_rep=self.num_key_value_groups)\n # (bsz, num_key_value_heads, q_len, head_dim) -> (bsz, num_heads, q_len, head_dim)\n\n key_padding_mask = attention_mask\n # (bsz, num_heads, q_len, head_dim) -> (bsz, q_len, num_heads, head_dim)\n q, k, v = (states.transpose(1, 2) for states in (q, k, v))\n\n if past_kv_len > 0:\n q = torch.cat(\n tensors=(\n torch.full(\n size=(bsz, past_kv_len, self.num_heads, self.head_dim),\n fill_value=0.0,\n dtype=q.dtype,\n device=q.device,\n ),\n q,\n ),\n dim=1,\n ) # (bsz, past_kv_len + q_len, num_heads, head_dim)\n\n if key_padding_mask is None:\n # (bsz, past_kv_len + q_len, num_heads, head_dim)\n output = flash_attn_func(q=q, k=k, v=v, dropout_p=0.0, softmax_scale=None, causal=True) # (bsz, )\n output = rearrange(output, pattern=\"... h d -> ... (h d)\") # (bsz, past_kv_len + q_len, num_heads * head_dim)\n else:\n q, indices, cu_q_lens, max_q_len = unpad_input(hidden_states=q, attention_mask=key_padding_mask)\n kv, _, cu_kv_lens, max_kv_len = unpad_input(\n hidden_states=torch.stack(tensors=(k, v), dim=2),\n attention_mask=key_padding_mask,\n )\n output_unpad = flash_attn_varlen_kvpacked_func(\n q=q,\n kv=kv,\n cu_seqlens_q=cu_q_lens,\n cu_seqlens_k=cu_kv_lens,\n max_seqlen_q=max_q_len,\n max_seqlen_k=max_kv_len,\n dropout_p=0.0,\n softmax_scale=None,\n causal=True,\n )\n output = pad_input(\n hidden_states=rearrange(output_unpad, pattern=\"nnz h d -> nnz (h d)\"),\n indices=indices,\n batch=bsz,\n seqlen=past_kv_len + q_len,\n ) # (bsz, past_kv_len + q_len, num_heads * head_dim)\n\n if past_kv_len > 0:\n # Strip off the zero query outputs.\n output = output[:, past_kv_len:, ...] # (bsz, q_len, num_heads * head_dim)\n output = self.o_proj(output) # (bsz, q_len, hidden_size)\n return output, None, past_key_value\n\n\ndef rms_norm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Formard function for RMS Norm\n \"\"\"\n return rms_norm(x=hidden_states, weight=self.weight, epsilon=self.variance_epsilon)\n\n\ndef replace_with_flash_attention(model: LlamaForCausalLM) -> None:\n for name, module in model.named_modules():\n if isinstance(module, LlamaAttention):\n module.forward = MethodType(attention_forward, module)\n if isinstance(module, LlamaModel):\n module._prepare_decoder_attention_mask = MethodType(_prepare_decoder_attention_mask, module)\n if isinstance(module, LlamaRMSNorm):\n module.forward = MethodType(rms_norm_forward, module)\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/utils/flash_attention_patch.py"}]} | 3,286 | 196 |
gh_patches_debug_7373 | rasdani/github-patches | git_diff | wagtail__wagtail-1274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AdminAutoHeightTextInput doesn't auto-height on page load
`wagtailadmin.widgets.AdminAutoHeightTextInput` appears now to be the new default widget for `django.db.models.TextField`. It auto-heights when you type into it

…but on reloading the page, the auto-heighting appears not to occur. This is four rows of text after reloading the page:

Three rows, with the exact same widget height:

But two rows with a reduced widget height:

@davecranwell suggested an off-by-one error, which seems reasonable, given the slightly different heights above.
</issue>
<code>
[start of wagtail/wagtailadmin/widgets.py]
1 from __future__ import absolute_import, unicode_literals
2
3 import json
4
5 from django.core.urlresolvers import reverse
6 from django.forms import widgets
7 from django.contrib.contenttypes.models import ContentType
8 from django.utils.translation import ugettext_lazy as _
9 from django.template.loader import render_to_string
10
11 from wagtail.utils.widgets import WidgetWithScript
12 from wagtail.wagtailcore.models import Page
13
14 from taggit.forms import TagWidget
15
16
17 class AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):
18 def __init__(self, attrs=None):
19 # Use more appropriate rows default, given autoheight will alter this anyway
20 default_attrs = {'rows': '1'}
21 if attrs:
22 default_attrs.update(attrs)
23
24 super(AdminAutoHeightTextInput, self).__init__(default_attrs)
25
26 def render_js_init(self, id_, name, value):
27 return '$("#{0}").autosize();'.format(id_)
28
29 class AdminDateInput(WidgetWithScript, widgets.DateInput):
30 # Set a default date format to match the one that our JS date picker expects -
31 # it can still be overridden explicitly, but this way it won't be affected by
32 # the DATE_INPUT_FORMATS setting
33 def __init__(self, attrs=None, format='%Y-%m-%d'):
34 super(AdminDateInput, self).__init__(attrs=attrs, format=format)
35
36 def render_js_init(self, id_, name, value):
37 return 'initDateChooser({0});'.format(json.dumps(id_))
38
39
40 class AdminTimeInput(WidgetWithScript, widgets.TimeInput):
41 def __init__(self, attrs=None, format='%H:%M'):
42 super(AdminTimeInput, self).__init__(attrs=attrs, format=format)
43
44 def render_js_init(self, id_, name, value):
45 return 'initTimeChooser({0});'.format(json.dumps(id_))
46
47
48 class AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):
49 def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):
50 super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)
51
52 def render_js_init(self, id_, name, value):
53 return 'initDateTimeChooser({0});'.format(json.dumps(id_))
54
55
56 class AdminTagWidget(WidgetWithScript, TagWidget):
57 def render_js_init(self, id_, name, value):
58 return "initTagField({0}, {1});".format(
59 json.dumps(id_),
60 json.dumps(reverse('wagtailadmin_tag_autocomplete')))
61
62
63 class AdminChooser(WidgetWithScript, widgets.Input):
64 input_type = 'hidden'
65 choose_one_text = _("Choose an item")
66 choose_another_text = _("Choose another item")
67 clear_choice_text = _("Clear choice")
68 link_to_chosen_text = _("Edit this item")
69
70 def get_instance(self, model_class, value):
71 # helper method for cleanly turning 'value' into an instance object
72 if value is None:
73 return None
74
75 try:
76 return model_class.objects.get(pk=value)
77 except model_class.DoesNotExist:
78 return None
79
80 def get_instance_and_id(self, model_class, value):
81 if value is None:
82 return (None, None)
83 elif isinstance(value, model_class):
84 return (value, value.pk)
85 else:
86 try:
87 return (model_class.objects.get(pk=value), value)
88 except model_class.DoesNotExist:
89 return (None, None)
90
91 def value_from_datadict(self, data, files, name):
92 # treat the empty string as None
93 result = super(AdminChooser, self).value_from_datadict(data, files, name)
94 if result == '':
95 return None
96 else:
97 return result
98
99 def __init__(self, **kwargs):
100 # allow choose_one_text / choose_another_text to be overridden per-instance
101 if 'choose_one_text' in kwargs:
102 self.choose_one_text = kwargs.pop('choose_one_text')
103 if 'choose_another_text' in kwargs:
104 self.choose_another_text = kwargs.pop('choose_another_text')
105 if 'clear_choice_text' in kwargs:
106 self.clear_choice_text = kwargs.pop('clear_choice_text')
107 if 'link_to_chosen_text' in kwargs:
108 self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
109 super(AdminChooser, self).__init__(**kwargs)
110
111
112 class AdminPageChooser(AdminChooser):
113 target_content_type = None
114 choose_one_text = _('Choose a page')
115 choose_another_text = _('Choose another page')
116 link_to_chosen_text = _('Edit this page')
117
118 def __init__(self, content_type=None, **kwargs):
119 super(AdminPageChooser, self).__init__(**kwargs)
120 self.target_content_type = content_type or ContentType.objects.get_for_model(Page)
121
122 def render_html(self, name, value, attrs):
123 model_class = self.target_content_type.model_class()
124 instance, value = self.get_instance_and_id(model_class, value)
125
126 original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)
127
128 return render_to_string("wagtailadmin/widgets/page_chooser.html", {
129 'widget': self,
130 'original_field_html': original_field_html,
131 'attrs': attrs,
132 'value': value,
133 'page': instance,
134 })
135
136 def render_js_init(self, id_, name, value):
137 model_class = self.target_content_type.model_class()
138 if isinstance(value, model_class):
139 page = value
140 else:
141 page = self.get_instance(model_class, value)
142 parent = page.get_parent() if page else None
143 content_type = self.target_content_type
144
145 return "createPageChooser({id}, {content_type}, {parent});".format(
146 id=json.dumps(id_),
147 content_type=json.dumps('{app}.{model}'.format(
148 app=content_type.app_label,
149 model=content_type.model)),
150 parent=json.dumps(parent.id if parent else None))
151
[end of wagtail/wagtailadmin/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailadmin/widgets.py b/wagtail/wagtailadmin/widgets.py
--- a/wagtail/wagtailadmin/widgets.py
+++ b/wagtail/wagtailadmin/widgets.py
@@ -24,7 +24,7 @@
super(AdminAutoHeightTextInput, self).__init__(default_attrs)
def render_js_init(self, id_, name, value):
- return '$("#{0}").autosize();'.format(id_)
+ return 'autosize($("#{0}"));'.format(id_)
class AdminDateInput(WidgetWithScript, widgets.DateInput):
# Set a default date format to match the one that our JS date picker expects -
| {"golden_diff": "diff --git a/wagtail/wagtailadmin/widgets.py b/wagtail/wagtailadmin/widgets.py\n--- a/wagtail/wagtailadmin/widgets.py\n+++ b/wagtail/wagtailadmin/widgets.py\n@@ -24,7 +24,7 @@\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n \n def render_js_init(self, id_, name, value):\n- return '$(\"#{0}\").autosize();'.format(id_)\n+ return 'autosize($(\"#{0}\"));'.format(id_)\n \n class AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n", "issue": "AdminAutoHeightTextInput doesn't auto-height on page load\n`wagtailadmin.widgets.AdminAutoHeightTextInput` appears now to be the new default widget for `django.db.models.TextField`. It auto-heights when you type into it\n\n\n\n\u2026but on reloading the page, the auto-heighting appears not to occur. This is four rows of text after reloading the page:\n\n \n\nThree rows, with the exact same widget height:\n\n\n\nBut two rows with a reduced widget height:\n\n\n\n@davecranwell suggested an off-by-one error, which seems reasonable, given the slightly different heights above. \n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms import widgets\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.loader import render_to_string\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.models import Page\n\nfrom taggit.forms import TagWidget\n\n\nclass AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):\n def __init__(self, attrs=None):\n # Use more appropriate rows default, given autoheight will alter this anyway\n default_attrs = {'rows': '1'}\n if attrs:\n default_attrs.update(attrs)\n\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n\n def render_js_init(self, id_, name, value):\n return '$(\"#{0}\").autosize();'.format(id_)\n\nclass AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n # it can still be overridden explicitly, but this way it won't be affected by\n # the DATE_INPUT_FORMATS setting\n def __init__(self, attrs=None, format='%Y-%m-%d'):\n super(AdminDateInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTimeInput(WidgetWithScript, widgets.TimeInput):\n def __init__(self, attrs=None, format='%H:%M'):\n super(AdminTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):\n def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):\n super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTagWidget(WidgetWithScript, TagWidget):\n def render_js_init(self, id_, name, value):\n return \"initTagField({0}, {1});\".format(\n json.dumps(id_),\n json.dumps(reverse('wagtailadmin_tag_autocomplete')))\n\n\nclass AdminChooser(WidgetWithScript, widgets.Input):\n input_type = 'hidden'\n choose_one_text = _(\"Choose an item\")\n choose_another_text = _(\"Choose another item\")\n clear_choice_text = _(\"Clear choice\")\n link_to_chosen_text = _(\"Edit this item\")\n\n def get_instance(self, model_class, value):\n # helper method for cleanly turning 'value' into an instance object\n if value is None:\n return None\n\n try:\n return model_class.objects.get(pk=value)\n except model_class.DoesNotExist:\n return None\n\n def get_instance_and_id(self, model_class, value):\n if value is None:\n return (None, None)\n elif isinstance(value, model_class):\n return (value, value.pk)\n else:\n try:\n return (model_class.objects.get(pk=value), value)\n except model_class.DoesNotExist:\n return (None, None)\n\n def value_from_datadict(self, data, files, name):\n # treat the empty string as None\n result = super(AdminChooser, self).value_from_datadict(data, files, name)\n if result == '':\n return None\n else:\n return result\n\n def __init__(self, **kwargs):\n # allow choose_one_text / choose_another_text to be overridden per-instance\n if 'choose_one_text' in kwargs:\n self.choose_one_text = kwargs.pop('choose_one_text')\n if 'choose_another_text' in kwargs:\n self.choose_another_text = kwargs.pop('choose_another_text')\n if 'clear_choice_text' in kwargs:\n self.clear_choice_text = kwargs.pop('clear_choice_text')\n if 'link_to_chosen_text' in kwargs:\n self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')\n super(AdminChooser, self).__init__(**kwargs)\n\n\nclass AdminPageChooser(AdminChooser):\n target_content_type = None\n choose_one_text = _('Choose a page')\n choose_another_text = _('Choose another page')\n link_to_chosen_text = _('Edit this page')\n\n def __init__(self, content_type=None, **kwargs):\n super(AdminPageChooser, self).__init__(**kwargs)\n self.target_content_type = content_type or ContentType.objects.get_for_model(Page)\n\n def render_html(self, name, value, attrs):\n model_class = self.target_content_type.model_class()\n instance, value = self.get_instance_and_id(model_class, value)\n\n original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)\n\n return render_to_string(\"wagtailadmin/widgets/page_chooser.html\", {\n 'widget': self,\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n 'page': instance,\n })\n\n def render_js_init(self, id_, name, value):\n model_class = self.target_content_type.model_class()\n if isinstance(value, model_class):\n page = value\n else:\n page = self.get_instance(model_class, value)\n parent = page.get_parent() if page else None\n content_type = self.target_content_type\n\n return \"createPageChooser({id}, {content_type}, {parent});\".format(\n id=json.dumps(id_),\n content_type=json.dumps('{app}.{model}'.format(\n app=content_type.app_label,\n model=content_type.model)),\n parent=json.dumps(parent.id if parent else None))\n", "path": "wagtail/wagtailadmin/widgets.py"}]} | 2,621 | 151 |
gh_patches_debug_31012 | rasdani/github-patches | git_diff | akvo__akvo-rsr-4566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
enumerators should be able to see "pending/unapproved" indicator updates from other enumerators.
When multiple enumerators are collaborating on a single indicator, they need to be able to see the updates from other enumerators.
</issue>
<code>
[start of akvo/rsr/models/result/indicator_period_data.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from decimal import Decimal, InvalidOperation
8
9 from django.conf import settings
10 from django.contrib.postgres.fields import ArrayField
11 from django.core.exceptions import ValidationError
12 from django.db import models
13 from django.db.models.signals import post_save
14 from django.dispatch import receiver
15 from django.utils.translation import ugettext_lazy as _
16 from sorl.thumbnail.fields import ImageField
17
18 from .utils import (calculate_percentage, file_path, image_path,
19 MultipleUpdateError, PERCENTAGE_MEASURE, QUALITATIVE,
20 QUANTITATIVE)
21 from akvo.rsr.fields import ValidXMLCharField, ValidXMLTextField
22 from akvo.rsr.mixins import TimestampsMixin, IndicatorUpdateMixin
23 from akvo.utils import rsr_image_path
24
25
26 class IndicatorPeriodData(TimestampsMixin, IndicatorUpdateMixin, models.Model):
27 """
28 Model for adding data to an indicator period.
29 """
30
31 project_relation = 'results__indicators__periods__data__in'
32
33 STATUS_DRAFT = str(_('draft'))
34 STATUS_PENDING = str(_('pending approval'))
35 STATUS_REVISION = str(_('return for revision'))
36 STATUS_APPROVED = str(_('approved'))
37
38 STATUS_DRAFT_CODE = 'D'
39 STATUS_PENDING_CODE = 'P'
40 STATUS_REVISION_CODE = 'R'
41 STATUS_APPROVED_CODE = 'A'
42
43 STATUS_CODES_LIST = [STATUS_DRAFT_CODE, STATUS_PENDING_CODE,
44 STATUS_REVISION_CODE, STATUS_APPROVED_CODE]
45 STATUSES_LABELS_LIST = [STATUS_DRAFT, STATUS_PENDING, STATUS_REVISION,
46 STATUS_APPROVED]
47 STATUSES = list(zip(STATUS_CODES_LIST, STATUSES_LABELS_LIST))
48
49 UPDATE_METHODS = (
50 ('W', _('web')),
51 ('M', _('mobile')),
52 )
53
54 period = models.ForeignKey('IndicatorPeriod', verbose_name=_('indicator period'),
55 related_name='data', on_delete=models.PROTECT)
56 # TODO: rename to created_by when old results framework page is no longer in use
57 user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True,
58 related_name='created_period_updates')
59 approved_by = models.ForeignKey(
60 settings.AUTH_USER_MODEL, verbose_name=_('approved by'), db_index=True,
61 related_name='approved_period_updates', blank=True, null=True,
62 )
63 narrative = ValidXMLTextField(_('qualitative indicator narrative'), blank=True)
64 score_index = models.SmallIntegerField(_('score index'), null=True, blank=True)
65 score_indices = ArrayField(models.SmallIntegerField(), default=[])
66 period_actual_value = ValidXMLCharField(_('period actual value'), max_length=50, default='')
67 status = ValidXMLCharField(_('status'), max_length=1, choices=STATUSES, db_index=True,
68 default=STATUS_DRAFT_CODE)
69 text = ValidXMLTextField(_('text'), blank=True)
70 review_note = ValidXMLTextField(_('text'), blank=True)
71 photo = ImageField(_('photo'), blank=True, upload_to=image_path, max_length=255)
72 file = models.FileField(_('file'), blank=True, upload_to=file_path, max_length=255)
73 update_method = ValidXMLCharField(_('update method'), blank=True, max_length=1,
74 choices=UPDATE_METHODS, db_index=True, default='W')
75
76 class Meta:
77 app_label = 'rsr'
78 verbose_name = _('indicator period data')
79 verbose_name_plural = _('indicator period data')
80 ordering = ('-id', )
81
82 def save(self, recalculate=True, *args, **kwargs):
83 # Allow only a single update for percentage measure indicators
84 if not self.period.can_save_update(self.id):
85 raise MultipleUpdateError('Cannot create multiple updates with percentages')
86
87 if (
88 self.period.indicator.measure == PERCENTAGE_MEASURE
89 and self.numerator is not None
90 and self.denominator not in {0, '0', None}
91 ):
92 self.value = calculate_percentage(self.numerator, self.denominator)
93
94 super(IndicatorPeriodData, self).save(*args, **kwargs)
95
96 # In case the status is approved, recalculate the period
97 if recalculate and self.status == self.STATUS_APPROVED_CODE:
98 # FIXME: Should we call this even when status is not approved?
99 self.period.recalculate_period()
100 self.period.update_actual_comment()
101 # Update score even when the update is not approved, yet. It handles the
102 # case where an approved update is returned for revision, etc.
103 self.period.update_score()
104
105 def delete(self, *args, **kwargs):
106 old_status = self.status
107
108 super(IndicatorPeriodData, self).delete(*args, **kwargs)
109
110 # In case the status was approved, recalculate the period
111 if old_status == self.STATUS_APPROVED_CODE:
112 self.period.recalculate_period()
113 self.period.update_actual_comment()
114 self.period.update_score()
115
116 def clean(self):
117 """
118 Perform several checks before we can actually save the update data.
119 """
120 validation_errors = {}
121
122 project = self.period.indicator.result.project
123
124 # Don't allow a data update to an unpublished project
125 if not project.is_published():
126 validation_errors['period'] = str(_('Indicator period must be part of a published '
127 'project to add data to it'))
128 raise ValidationError(validation_errors)
129
130 # Don't allow a data update to a non-Impact project
131 if not project.is_impact_project:
132 validation_errors['period'] = str(_('Indicator period must be part of an RSR '
133 'Impact project to add data to it'))
134 raise ValidationError(validation_errors)
135
136 # Don't allow a data update to a locked period
137 if self.period.locked:
138 validation_errors['period'] = str(_('Indicator period must be unlocked to add '
139 'data to it'))
140 raise ValidationError(validation_errors)
141
142 # Don't allow a data update to an aggregated parent period with 'percentage' as measurement
143 if self.period.indicator.children_aggregate_percentage:
144 validation_errors['period'] = str(
145 _('Indicator period has an average aggregate of the child projects. Disable '
146 'aggregations to add data to it'))
147 raise ValidationError(validation_errors)
148
149 if self.pk:
150 orig = IndicatorPeriodData.objects.get(pk=self.pk)
151
152 # Don't allow for the indicator period to change
153 if orig.period != self.period:
154 validation_errors['period'] = str(_('Not allowed to change indicator period '
155 'in a data update'))
156
157 if self.period.indicator.type == QUANTITATIVE:
158 if self.narrative is not None:
159 validation_errors['period'] = str(
160 _('Narrative field should be empty in quantitative indicators'))
161 if self.value is not None:
162 try:
163 self.value = Decimal(self.value)
164 except Exception:
165 validation_errors['period'] = str(
166 _('Only numeric values are allowed in quantitative indicators'))
167
168 if self.period.indicator.type == QUALITATIVE:
169 if self.value is not None:
170 validation_errors['period'] = str(
171 _('Value field should be empty in qualitative indicators'))
172
173 if validation_errors:
174 raise ValidationError(validation_errors)
175
176 @property
177 def status_display(self):
178 """
179 Returns the display of the status.
180 """
181 try:
182 return dict(self.STATUSES)[self.status].capitalize()
183 except KeyError:
184 return ''
185
186 @property
187 def photo_url(self):
188 """
189 Returns the full URL of the photo.
190 """
191 return self.photo.url if self.photo else ''
192
193 @property
194 def file_url(self):
195 """
196 Returns the full URL of the file.
197 """
198 return self.file.url if self.file else ''
199
200 def update_new_value(self):
201 """Returns a string with the new value."""
202 try:
203 add_up = Decimal(self.value) + Decimal(self.period_actual_value)
204 relative = '+' + str(self.value) if self.value >= 0 else str(self.value)
205 return "{} ({})".format(str(add_up), relative)
206 except (InvalidOperation, TypeError):
207 return self.value
208
209 @classmethod
210 def get_user_viewable_updates(cls, queryset, user):
211 approved_updates = queryset.filter(status=cls.STATUS_APPROVED_CODE)
212
213 if user.is_anonymous():
214 f_queryset = approved_updates
215
216 elif user.is_admin or user.is_superuser:
217 f_queryset = queryset
218
219 else:
220 own_updates = queryset.filter(user=user)
221 non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)
222 filter_ = user.get_permission_filter(
223 'rsr.view_indicatorperioddata',
224 'period__indicator__result__project__'
225 )
226
227 from akvo.rsr.models import Project
228 projects = Project.objects\
229 .filter(results__indicators__periods__data__in=queryset)\
230 .distinct()
231
232 project = projects.first()
233 if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():
234 others_updates = non_draft_updates
235 else:
236 others_updates = non_draft_updates.filter(filter_)
237
238 f_queryset = (
239 approved_updates
240 | own_updates
241 | others_updates
242 )
243
244 return f_queryset.distinct()
245
246
247 def update_image_path(instance, file_name):
248 path = 'db/indicator_period_data/%d/data_photo/%%(instance_pk)s/%%(file_name)s' % instance.update.pk
249 return rsr_image_path(instance, file_name, path)
250
251
252 class IndicatorPeriodDataPhoto(models.Model):
253 update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)
254 photo = ImageField(_('photo'), upload_to=update_image_path, max_length=255)
255
256 class Meta:
257 app_label = 'rsr'
258
259
260 def update_file_path(instance, file_name):
261 path = 'db/indicator_period_data/%d/data_file/%%(instance_pk)s/%%(file_name)s' % instance.update.pk
262 return rsr_image_path(instance, file_name, path)
263
264
265 class IndicatorPeriodDataFile(models.Model):
266 update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)
267 file = models.FileField(_('file'), upload_to=update_file_path, max_length=255)
268
269 class Meta:
270 app_label = 'rsr'
271
272
273 @receiver(post_save, sender=IndicatorPeriodData)
274 def set_qualitative_narrative(sender, **kwargs):
275 """Update the narrative field of a qualitative indicator on updates."""
276
277 # Disable signal handler when loading fixtures
278 if kwargs.get('raw', False):
279 return
280
281 update = kwargs['instance']
282 if update.status != IndicatorPeriodData.STATUS_APPROVED_CODE:
283 return
284
285 if update.period.indicator.type != QUALITATIVE:
286 return
287
288 # Current update is the latest update?
289 if update.period.approved_updates.last().id != update.id:
290 return
291
292 update.period.narrative = update.narrative
293 update.period.save()
294
295
296 @receiver(post_save, sender=IndicatorPeriodData)
297 def _send_return_for_revision_email(sender, **kwargs):
298 """Send email to assigned enumerator when indicator is returned for revision."""
299
300 # Disable signal handler when loading fixtures
301 if kwargs.get('raw', False):
302 return
303
304 update = kwargs['instance']
305 if update.status != IndicatorPeriodData.STATUS_REVISION_CODE:
306 return
307
308 from akvo.rest.views.project_enumerators import send_return_for_revision_email
309 send_return_for_revision_email(update)
310
[end of akvo/rsr/models/result/indicator_period_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/models/result/indicator_period_data.py b/akvo/rsr/models/result/indicator_period_data.py
--- a/akvo/rsr/models/result/indicator_period_data.py
+++ b/akvo/rsr/models/result/indicator_period_data.py
@@ -217,29 +217,29 @@
f_queryset = queryset
else:
- own_updates = queryset.filter(user=user)
- non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)
- filter_ = user.get_permission_filter(
- 'rsr.view_indicatorperioddata',
- 'period__indicator__result__project__'
- )
-
from akvo.rsr.models import Project
projects = Project.objects\
.filter(results__indicators__periods__data__in=queryset)\
.distinct()
+ project = projects.first() if projects.count() == 1 else None
+
+ # Allow Nuffic users to see all updates, irrespective of what state they are in
+ if project is not None and project.in_nuffic_hierarchy() and user.has_perm('rsr.view_project', project):
+ f_queryset = queryset
- project = projects.first()
- if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():
- others_updates = non_draft_updates
else:
+ own_updates = queryset.filter(user=user)
+ non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)
+ filter_ = user.get_permission_filter(
+ 'rsr.view_indicatorperioddata',
+ 'period__indicator__result__project__'
+ )
others_updates = non_draft_updates.filter(filter_)
-
- f_queryset = (
- approved_updates
- | own_updates
- | others_updates
- )
+ f_queryset = (
+ approved_updates
+ | own_updates
+ | others_updates
+ )
return f_queryset.distinct()
| {"golden_diff": "diff --git a/akvo/rsr/models/result/indicator_period_data.py b/akvo/rsr/models/result/indicator_period_data.py\n--- a/akvo/rsr/models/result/indicator_period_data.py\n+++ b/akvo/rsr/models/result/indicator_period_data.py\n@@ -217,29 +217,29 @@\n f_queryset = queryset\n \n else:\n- own_updates = queryset.filter(user=user)\n- non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n- filter_ = user.get_permission_filter(\n- 'rsr.view_indicatorperioddata',\n- 'period__indicator__result__project__'\n- )\n-\n from akvo.rsr.models import Project\n projects = Project.objects\\\n .filter(results__indicators__periods__data__in=queryset)\\\n .distinct()\n+ project = projects.first() if projects.count() == 1 else None\n+\n+ # Allow Nuffic users to see all updates, irrespective of what state they are in\n+ if project is not None and project.in_nuffic_hierarchy() and user.has_perm('rsr.view_project', project):\n+ f_queryset = queryset\n \n- project = projects.first()\n- if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():\n- others_updates = non_draft_updates\n else:\n+ own_updates = queryset.filter(user=user)\n+ non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n+ filter_ = user.get_permission_filter(\n+ 'rsr.view_indicatorperioddata',\n+ 'period__indicator__result__project__'\n+ )\n others_updates = non_draft_updates.filter(filter_)\n-\n- f_queryset = (\n- approved_updates\n- | own_updates\n- | others_updates\n- )\n+ f_queryset = (\n+ approved_updates\n+ | own_updates\n+ | others_updates\n+ )\n \n return f_queryset.distinct()\n", "issue": "enumerators should be able to see \"pending/unapproved\" indicator updates from other enumerators.\nWhen multiple enumerators are collaborating on a single indicator, they need to be able to see the updates from other enumerators. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom decimal import Decimal, InvalidOperation\n\nfrom django.conf import settings\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.translation import ugettext_lazy as _\nfrom sorl.thumbnail.fields import ImageField\n\nfrom .utils import (calculate_percentage, file_path, image_path,\n MultipleUpdateError, PERCENTAGE_MEASURE, QUALITATIVE,\n QUANTITATIVE)\nfrom akvo.rsr.fields import ValidXMLCharField, ValidXMLTextField\nfrom akvo.rsr.mixins import TimestampsMixin, IndicatorUpdateMixin\nfrom akvo.utils import rsr_image_path\n\n\nclass IndicatorPeriodData(TimestampsMixin, IndicatorUpdateMixin, models.Model):\n \"\"\"\n Model for adding data to an indicator period.\n \"\"\"\n\n project_relation = 'results__indicators__periods__data__in'\n\n STATUS_DRAFT = str(_('draft'))\n STATUS_PENDING = str(_('pending approval'))\n STATUS_REVISION = str(_('return for revision'))\n STATUS_APPROVED = str(_('approved'))\n\n STATUS_DRAFT_CODE = 'D'\n STATUS_PENDING_CODE = 'P'\n STATUS_REVISION_CODE = 'R'\n STATUS_APPROVED_CODE = 'A'\n\n STATUS_CODES_LIST = [STATUS_DRAFT_CODE, STATUS_PENDING_CODE,\n STATUS_REVISION_CODE, STATUS_APPROVED_CODE]\n STATUSES_LABELS_LIST = [STATUS_DRAFT, STATUS_PENDING, STATUS_REVISION,\n STATUS_APPROVED]\n STATUSES = list(zip(STATUS_CODES_LIST, STATUSES_LABELS_LIST))\n\n UPDATE_METHODS = (\n ('W', _('web')),\n ('M', _('mobile')),\n )\n\n period = models.ForeignKey('IndicatorPeriod', verbose_name=_('indicator period'),\n related_name='data', on_delete=models.PROTECT)\n # TODO: rename to created_by when old results framework page is no longer in use\n user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('user'), db_index=True,\n related_name='created_period_updates')\n approved_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, verbose_name=_('approved by'), db_index=True,\n related_name='approved_period_updates', blank=True, null=True,\n )\n narrative = ValidXMLTextField(_('qualitative indicator narrative'), blank=True)\n score_index = models.SmallIntegerField(_('score index'), null=True, blank=True)\n score_indices = ArrayField(models.SmallIntegerField(), default=[])\n period_actual_value = ValidXMLCharField(_('period actual value'), max_length=50, default='')\n status = ValidXMLCharField(_('status'), max_length=1, choices=STATUSES, db_index=True,\n default=STATUS_DRAFT_CODE)\n text = ValidXMLTextField(_('text'), blank=True)\n review_note = ValidXMLTextField(_('text'), blank=True)\n photo = ImageField(_('photo'), blank=True, upload_to=image_path, max_length=255)\n file = models.FileField(_('file'), blank=True, upload_to=file_path, max_length=255)\n update_method = ValidXMLCharField(_('update method'), blank=True, max_length=1,\n choices=UPDATE_METHODS, db_index=True, default='W')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('indicator period data')\n verbose_name_plural = _('indicator period data')\n ordering = ('-id', )\n\n def save(self, recalculate=True, *args, **kwargs):\n # Allow only a single update for percentage measure indicators\n if not self.period.can_save_update(self.id):\n raise MultipleUpdateError('Cannot create multiple updates with percentages')\n\n if (\n self.period.indicator.measure == PERCENTAGE_MEASURE\n and self.numerator is not None\n and self.denominator not in {0, '0', None}\n ):\n self.value = calculate_percentage(self.numerator, self.denominator)\n\n super(IndicatorPeriodData, self).save(*args, **kwargs)\n\n # In case the status is approved, recalculate the period\n if recalculate and self.status == self.STATUS_APPROVED_CODE:\n # FIXME: Should we call this even when status is not approved?\n self.period.recalculate_period()\n self.period.update_actual_comment()\n # Update score even when the update is not approved, yet. It handles the\n # case where an approved update is returned for revision, etc.\n self.period.update_score()\n\n def delete(self, *args, **kwargs):\n old_status = self.status\n\n super(IndicatorPeriodData, self).delete(*args, **kwargs)\n\n # In case the status was approved, recalculate the period\n if old_status == self.STATUS_APPROVED_CODE:\n self.period.recalculate_period()\n self.period.update_actual_comment()\n self.period.update_score()\n\n def clean(self):\n \"\"\"\n Perform several checks before we can actually save the update data.\n \"\"\"\n validation_errors = {}\n\n project = self.period.indicator.result.project\n\n # Don't allow a data update to an unpublished project\n if not project.is_published():\n validation_errors['period'] = str(_('Indicator period must be part of a published '\n 'project to add data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to a non-Impact project\n if not project.is_impact_project:\n validation_errors['period'] = str(_('Indicator period must be part of an RSR '\n 'Impact project to add data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to a locked period\n if self.period.locked:\n validation_errors['period'] = str(_('Indicator period must be unlocked to add '\n 'data to it'))\n raise ValidationError(validation_errors)\n\n # Don't allow a data update to an aggregated parent period with 'percentage' as measurement\n if self.period.indicator.children_aggregate_percentage:\n validation_errors['period'] = str(\n _('Indicator period has an average aggregate of the child projects. Disable '\n 'aggregations to add data to it'))\n raise ValidationError(validation_errors)\n\n if self.pk:\n orig = IndicatorPeriodData.objects.get(pk=self.pk)\n\n # Don't allow for the indicator period to change\n if orig.period != self.period:\n validation_errors['period'] = str(_('Not allowed to change indicator period '\n 'in a data update'))\n\n if self.period.indicator.type == QUANTITATIVE:\n if self.narrative is not None:\n validation_errors['period'] = str(\n _('Narrative field should be empty in quantitative indicators'))\n if self.value is not None:\n try:\n self.value = Decimal(self.value)\n except Exception:\n validation_errors['period'] = str(\n _('Only numeric values are allowed in quantitative indicators'))\n\n if self.period.indicator.type == QUALITATIVE:\n if self.value is not None:\n validation_errors['period'] = str(\n _('Value field should be empty in qualitative indicators'))\n\n if validation_errors:\n raise ValidationError(validation_errors)\n\n @property\n def status_display(self):\n \"\"\"\n Returns the display of the status.\n \"\"\"\n try:\n return dict(self.STATUSES)[self.status].capitalize()\n except KeyError:\n return ''\n\n @property\n def photo_url(self):\n \"\"\"\n Returns the full URL of the photo.\n \"\"\"\n return self.photo.url if self.photo else ''\n\n @property\n def file_url(self):\n \"\"\"\n Returns the full URL of the file.\n \"\"\"\n return self.file.url if self.file else ''\n\n def update_new_value(self):\n \"\"\"Returns a string with the new value.\"\"\"\n try:\n add_up = Decimal(self.value) + Decimal(self.period_actual_value)\n relative = '+' + str(self.value) if self.value >= 0 else str(self.value)\n return \"{} ({})\".format(str(add_up), relative)\n except (InvalidOperation, TypeError):\n return self.value\n\n @classmethod\n def get_user_viewable_updates(cls, queryset, user):\n approved_updates = queryset.filter(status=cls.STATUS_APPROVED_CODE)\n\n if user.is_anonymous():\n f_queryset = approved_updates\n\n elif user.is_admin or user.is_superuser:\n f_queryset = queryset\n\n else:\n own_updates = queryset.filter(user=user)\n non_draft_updates = queryset.exclude(status=cls.STATUS_DRAFT_CODE)\n filter_ = user.get_permission_filter(\n 'rsr.view_indicatorperioddata',\n 'period__indicator__result__project__'\n )\n\n from akvo.rsr.models import Project\n projects = Project.objects\\\n .filter(results__indicators__periods__data__in=queryset)\\\n .distinct()\n\n project = projects.first()\n if projects.count() == 1 and user.has_perm('rsr.view_project', project) and project.in_nuffic_hierarchy():\n others_updates = non_draft_updates\n else:\n others_updates = non_draft_updates.filter(filter_)\n\n f_queryset = (\n approved_updates\n | own_updates\n | others_updates\n )\n\n return f_queryset.distinct()\n\n\ndef update_image_path(instance, file_name):\n path = 'db/indicator_period_data/%d/data_photo/%%(instance_pk)s/%%(file_name)s' % instance.update.pk\n return rsr_image_path(instance, file_name, path)\n\n\nclass IndicatorPeriodDataPhoto(models.Model):\n update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)\n photo = ImageField(_('photo'), upload_to=update_image_path, max_length=255)\n\n class Meta:\n app_label = 'rsr'\n\n\ndef update_file_path(instance, file_name):\n path = 'db/indicator_period_data/%d/data_file/%%(instance_pk)s/%%(file_name)s' % instance.update.pk\n return rsr_image_path(instance, file_name, path)\n\n\nclass IndicatorPeriodDataFile(models.Model):\n update = models.ForeignKey('IndicatorPeriodData', on_delete=models.CASCADE)\n file = models.FileField(_('file'), upload_to=update_file_path, max_length=255)\n\n class Meta:\n app_label = 'rsr'\n\n\n@receiver(post_save, sender=IndicatorPeriodData)\ndef set_qualitative_narrative(sender, **kwargs):\n \"\"\"Update the narrative field of a qualitative indicator on updates.\"\"\"\n\n # Disable signal handler when loading fixtures\n if kwargs.get('raw', False):\n return\n\n update = kwargs['instance']\n if update.status != IndicatorPeriodData.STATUS_APPROVED_CODE:\n return\n\n if update.period.indicator.type != QUALITATIVE:\n return\n\n # Current update is the latest update?\n if update.period.approved_updates.last().id != update.id:\n return\n\n update.period.narrative = update.narrative\n update.period.save()\n\n\n@receiver(post_save, sender=IndicatorPeriodData)\ndef _send_return_for_revision_email(sender, **kwargs):\n \"\"\"Send email to assigned enumerator when indicator is returned for revision.\"\"\"\n\n # Disable signal handler when loading fixtures\n if kwargs.get('raw', False):\n return\n\n update = kwargs['instance']\n if update.status != IndicatorPeriodData.STATUS_REVISION_CODE:\n return\n\n from akvo.rest.views.project_enumerators import send_return_for_revision_email\n send_return_for_revision_email(update)\n", "path": "akvo/rsr/models/result/indicator_period_data.py"}]} | 3,923 | 437 |
gh_patches_debug_27372 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-4250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Different mean values of image normalization for HRNet
I find the different configs of HRNet experiments use different mean values for image normalization.
For example,
[fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) uses the `[102.9801, 115.9465, 122.7717]` as the mean value,
while [fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) uses the `[103.530, 116.280, 123.675]` as the mean value.
Which one is correct?
</issue>
<code>
[start of configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py]
1 _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
2 img_norm_cfg = dict(
3 mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
4 train_pipeline = [
5 dict(type='LoadImageFromFile'),
6 dict(type='LoadAnnotations', with_bbox=True),
7 dict(
8 type='Resize',
9 img_scale=[(1333, 640), (1333, 800)],
10 multiscale_mode='value',
11 keep_ratio=True),
12 dict(type='RandomFlip', flip_ratio=0.5),
13 dict(type='Normalize', **img_norm_cfg),
14 dict(type='Pad', size_divisor=32),
15 dict(type='DefaultFormatBundle'),
16 dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
17 ]
18 test_pipeline = [
19 dict(type='LoadImageFromFile'),
20 dict(
21 type='MultiScaleFlipAug',
22 img_scale=(1333, 800),
23 flip=False,
24 transforms=[
25 dict(type='Resize', keep_ratio=True),
26 dict(type='RandomFlip'),
27 dict(type='Normalize', **img_norm_cfg),
28 dict(type='Pad', size_divisor=32),
29 dict(type='ImageToTensor', keys=['img']),
30 dict(type='Collect', keys=['img']),
31 ])
32 ]
33 data = dict(
34 train=dict(pipeline=train_pipeline),
35 val=dict(pipeline=test_pipeline),
36 test=dict(pipeline=test_pipeline))
37 # learning policy
38 lr_config = dict(step=[16, 22])
39 total_epochs = 24
40
[end of configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py]
[start of configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py]
1 _base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'
2 model = dict(
3 pretrained='open-mmlab://msra/hrnetv2_w32',
4 backbone=dict(
5 _delete_=True,
6 type='HRNet',
7 extra=dict(
8 stage1=dict(
9 num_modules=1,
10 num_branches=1,
11 block='BOTTLENECK',
12 num_blocks=(4, ),
13 num_channels=(64, )),
14 stage2=dict(
15 num_modules=1,
16 num_branches=2,
17 block='BASIC',
18 num_blocks=(4, 4),
19 num_channels=(32, 64)),
20 stage3=dict(
21 num_modules=4,
22 num_branches=3,
23 block='BASIC',
24 num_blocks=(4, 4, 4),
25 num_channels=(32, 64, 128)),
26 stage4=dict(
27 num_modules=3,
28 num_branches=4,
29 block='BASIC',
30 num_blocks=(4, 4, 4, 4),
31 num_channels=(32, 64, 128, 256)))),
32 neck=dict(
33 _delete_=True,
34 type='HRFPN',
35 in_channels=[32, 64, 128, 256],
36 out_channels=256,
37 stride=2,
38 num_outs=5))
39
[end of configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
@@ -36,3 +36,34 @@
out_channels=256,
stride=2,
num_outs=5))
+img_norm_cfg = dict(
+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+ dict(type='RandomFlip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(1333, 800),
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
@@ -1,6 +1,6 @@
_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'
img_norm_cfg = dict(
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
| {"golden_diff": "diff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py\n@@ -36,3 +36,34 @@\n out_channels=256,\n stride=2,\n num_outs=5))\n+img_norm_cfg = dict(\n+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\n+train_pipeline = [\n+ dict(type='LoadImageFromFile'),\n+ dict(type='LoadAnnotations', with_bbox=True),\n+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n+ dict(type='RandomFlip', flip_ratio=0.5),\n+ dict(type='Normalize', **img_norm_cfg),\n+ dict(type='Pad', size_divisor=32),\n+ dict(type='DefaultFormatBundle'),\n+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n+]\n+test_pipeline = [\n+ dict(type='LoadImageFromFile'),\n+ dict(\n+ type='MultiScaleFlipAug',\n+ img_scale=(1333, 800),\n+ flip=False,\n+ transforms=[\n+ dict(type='Resize', keep_ratio=True),\n+ dict(type='RandomFlip'),\n+ dict(type='Normalize', **img_norm_cfg),\n+ dict(type='Pad', size_divisor=32),\n+ dict(type='ImageToTensor', keys=['img']),\n+ dict(type='Collect', keys=['img']),\n+ ])\n+]\n+data = dict(\n+ train=dict(pipeline=train_pipeline),\n+ val=dict(pipeline=test_pipeline),\n+ test=dict(pipeline=test_pipeline))\ndiff --git a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n--- a/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n+++ b/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py\n@@ -1,6 +1,6 @@\n _base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\n img_norm_cfg = dict(\n- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n+ mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)\n train_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n", "issue": "Different mean values of image normalization for HRNet\nI find the different configs of HRNet experiments use different mean values for image normalization.\r\n\r\nFor example,\r\n[fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py) uses the `[102.9801, 115.9465, 122.7717]` as the mean value,\r\nwhile [fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py) uses the `[103.530, 116.280, 123.675]` as the mean value.\r\n\r\nWhich one is correct?\n", "before_files": [{"content": "_base_ = './fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py'\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(\n type='Resize',\n img_scale=[(1333, 640), (1333, 800)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(1333, 800),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img']),\n ])\n]\ndata = dict(\n train=dict(pipeline=train_pipeline),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\n# learning policy\nlr_config = dict(step=[16, 22])\ntotal_epochs = 24\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py"}, {"content": "_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py'\nmodel = dict(\n pretrained='open-mmlab://msra/hrnetv2_w32',\n backbone=dict(\n _delete_=True,\n type='HRNet',\n extra=dict(\n stage1=dict(\n num_modules=1,\n num_branches=1,\n block='BOTTLENECK',\n num_blocks=(4, ),\n num_channels=(64, )),\n stage2=dict(\n num_modules=1,\n num_branches=2,\n block='BASIC',\n num_blocks=(4, 4),\n num_channels=(32, 64)),\n stage3=dict(\n num_modules=4,\n num_branches=3,\n block='BASIC',\n num_blocks=(4, 4, 4),\n num_channels=(32, 64, 128)),\n stage4=dict(\n num_modules=3,\n num_branches=4,\n block='BASIC',\n num_blocks=(4, 4, 4, 4),\n num_channels=(32, 64, 128, 256)))),\n neck=dict(\n _delete_=True,\n type='HRFPN',\n in_channels=[32, 64, 128, 256],\n out_channels=256,\n stride=2,\n num_outs=5))\n", "path": "configs/hrnet/fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py"}]} | 1,767 | 829 |
gh_patches_debug_45268 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-1001 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Webhook missing: invoice.payment_action_required
I can't find any reference to ```invoice.payment_action_required``` anywhere, however this is a fairly critical piece of off-session SCA workflow. Is it simply a case of adding it to the signals list?
https://stripe.com/docs/api/events/types#event_types-invoice.payment_action_required
</issue>
<code>
[start of djstripe/signals.py]
1 """
2 signals are sent for each event Stripe sends to the app
3
4 Stripe docs for Webhooks: https://stripe.com/docs/webhooks
5 """
6 from django.db.models.signals import pre_delete
7 from django.dispatch import Signal, receiver
8
9 from . import settings as djstripe_settings
10
11 webhook_processing_error = Signal(providing_args=["data", "exception"])
12
13 # A signal for each Event type. See https://stripe.com/docs/api/events/types
14
15 WEBHOOK_SIGNALS = dict(
16 [
17 (hook, Signal(providing_args=["event"]))
18 for hook in [
19 "account.updated",
20 "account.application.authorized",
21 "account.application.deauthorized",
22 "account.external_account.created",
23 "account.external_account.deleted",
24 "account.external_account.updated",
25 "application_fee.created",
26 "application_fee.refunded",
27 "application_fee.refund.updated",
28 "balance.available",
29 "charge.captured",
30 "charge.expired",
31 "charge.failed",
32 "charge.pending",
33 "charge.refunded",
34 "charge.succeeded",
35 "charge.updated",
36 "charge.dispute.closed",
37 "charge.dispute.created",
38 "charge.dispute.funds_reinstated",
39 "charge.dispute.funds_withdrawn",
40 "charge.dispute.updated",
41 "charge.refund.updated",
42 "checkout.session.completed",
43 "coupon.created",
44 "coupon.deleted",
45 "coupon.updated",
46 "customer.created",
47 "customer.deleted",
48 "customer.updated",
49 "customer.discount.created",
50 "customer.discount.deleted",
51 "customer.discount.updated",
52 "customer.source.created",
53 "customer.source.deleted",
54 "customer.source.expiring",
55 "customer.source.updated",
56 "customer.subscription.created",
57 "customer.subscription.deleted",
58 "customer.subscription.trial_will_end",
59 "customer.subscription.updated",
60 "file.created",
61 "invoice.created",
62 "invoice.deleted",
63 "invoice.finalized",
64 "invoice.marked_uncollectible",
65 "invoice.payment_failed",
66 "invoice.payment_succeeded",
67 "invoice.sent",
68 "invoice.upcoming",
69 "invoice.updated",
70 "invoice.voided",
71 "invoiceitem.created",
72 "invoiceitem.deleted",
73 "invoiceitem.updated",
74 "issuing_authorization.created",
75 "issuing_authorization.request",
76 "issuing_authorization.updated",
77 "issuing_card.created",
78 "issuing_card.updated",
79 "issuing_cardholder.created",
80 "issuing_cardholder.updated",
81 "issuing_dispute.created",
82 "issuing_dispute.updated",
83 "issuing_settlement.created",
84 "issuing_settlement.updated",
85 "issuing_transaction.created",
86 "issuing_transaction.updated",
87 "order.created",
88 "order.payment_failed",
89 "order.payment_succeeded",
90 "order.updated",
91 "order_return.created",
92 "payment_intent.amount_capturable_updated",
93 "payment_intent.created",
94 "payment_intent.payment_failed",
95 "payment_intent.succeeded",
96 "payment_method.attached",
97 "payment_method.card_automatically_updated",
98 "payment_method.detached",
99 "payment_method.updated",
100 "payout.canceled",
101 "payout.created",
102 "payout.failed",
103 "payout.paid",
104 "payout.updated",
105 "plan.created",
106 "plan.deleted",
107 "plan.updated",
108 "product.created",
109 "product.deleted",
110 "product.updated",
111 "recipient.created",
112 "recipient.deleted",
113 "recipient.updated",
114 "reporting.report_run.failed",
115 "reporting.report_run.succeeded",
116 "reporting.report_type.updated",
117 "review.closed",
118 "review.opened",
119 "setup_intent.created",
120 "setup_intent.setup_failed",
121 "setup_intent.succeeded",
122 "sigma.scheduled_query_run.created",
123 "sku.created",
124 "sku.deleted",
125 "sku.updated",
126 "source.canceled",
127 "source.chargeable",
128 "source.failed",
129 "source.mandate_notification",
130 "source.refund_attributes_required",
131 "source.transaction.created",
132 "source.transaction.updated",
133 "topup.canceled",
134 "topup.created",
135 "topup.failed",
136 "topup.reversed",
137 "topup.succeeded",
138 "transfer.created",
139 "transfer.reversed",
140 "transfer.updated",
141 # deprecated (no longer in events_types list) - TODO can be deleted?
142 "checkout_beta.session_succeeded",
143 "issuer_fraud_record.created",
144 "payment_intent.requires_capture",
145 "subscription_schedule.canceled",
146 "subscription_schedule.completed",
147 "subscription_schedule.created",
148 "subscription_schedule.released",
149 "subscription_schedule.updated",
150 # special case? - TODO can be deleted?
151 "ping",
152 ]
153 ]
154 )
155
156
157 @receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string())
158 def on_delete_subscriber_purge_customer(instance=None, **kwargs):
159 """ Purge associated customers when the subscriber is deleted. """
160 for customer in instance.djstripe_customers.all():
161 customer.purge()
162
[end of djstripe/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/djstripe/signals.py b/djstripe/signals.py
--- a/djstripe/signals.py
+++ b/djstripe/signals.py
@@ -16,36 +16,41 @@
[
(hook, Signal(providing_args=["event"]))
for hook in [
- "account.updated",
+ # Update this by copy-pasting the "enabled_events" enum values from
+ # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json
"account.application.authorized",
"account.application.deauthorized",
"account.external_account.created",
"account.external_account.deleted",
"account.external_account.updated",
+ "account.updated",
"application_fee.created",
- "application_fee.refunded",
"application_fee.refund.updated",
+ "application_fee.refunded",
"balance.available",
+ "capability.updated",
"charge.captured",
- "charge.expired",
- "charge.failed",
- "charge.pending",
- "charge.refunded",
- "charge.succeeded",
- "charge.updated",
"charge.dispute.closed",
"charge.dispute.created",
"charge.dispute.funds_reinstated",
"charge.dispute.funds_withdrawn",
"charge.dispute.updated",
+ "charge.expired",
+ "charge.failed",
+ "charge.pending",
"charge.refund.updated",
+ "charge.refunded",
+ "charge.succeeded",
+ "charge.updated",
"checkout.session.completed",
"coupon.created",
"coupon.deleted",
"coupon.updated",
+ "credit_note.created",
+ "credit_note.updated",
+ "credit_note.voided",
"customer.created",
"customer.deleted",
- "customer.updated",
"customer.discount.created",
"customer.discount.deleted",
"customer.discount.updated",
@@ -57,11 +62,16 @@
"customer.subscription.deleted",
"customer.subscription.trial_will_end",
"customer.subscription.updated",
+ "customer.tax_id.created",
+ "customer.tax_id.deleted",
+ "customer.tax_id.updated",
+ "customer.updated",
"file.created",
"invoice.created",
"invoice.deleted",
"invoice.finalized",
"invoice.marked_uncollectible",
+ "invoice.payment_action_required",
"invoice.payment_failed",
"invoice.payment_succeeded",
"invoice.sent",
@@ -102,12 +112,17 @@
"payout.failed",
"payout.paid",
"payout.updated",
+ "person.created",
+ "person.deleted",
+ "person.updated",
"plan.created",
"plan.deleted",
"plan.updated",
"product.created",
"product.deleted",
"product.updated",
+ "radar.early_fraud_warning.created",
+ "radar.early_fraud_warning.updated",
"recipient.created",
"recipient.deleted",
"recipient.updated",
@@ -130,12 +145,23 @@
"source.refund_attributes_required",
"source.transaction.created",
"source.transaction.updated",
+ "subscription_schedule.aborted",
+ "subscription_schedule.canceled",
+ "subscription_schedule.completed",
+ "subscription_schedule.created",
+ "subscription_schedule.expiring",
+ "subscription_schedule.released",
+ "subscription_schedule.updated",
+ "tax_rate.created",
+ "tax_rate.updated",
"topup.canceled",
"topup.created",
"topup.failed",
"topup.reversed",
"topup.succeeded",
"transfer.created",
+ "transfer.failed",
+ "transfer.paid",
"transfer.reversed",
"transfer.updated",
# deprecated (no longer in events_types list) - TODO can be deleted?
| {"golden_diff": "diff --git a/djstripe/signals.py b/djstripe/signals.py\n--- a/djstripe/signals.py\n+++ b/djstripe/signals.py\n@@ -16,36 +16,41 @@\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n- \"account.updated\",\n+ # Update this by copy-pasting the \"enabled_events\" enum values from\n+ # https://raw.githubusercontent.com/stripe/openapi/master/openapi/spec3.json\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n+ \"account.updated\",\n \"application_fee.created\",\n- \"application_fee.refunded\",\n \"application_fee.refund.updated\",\n+ \"application_fee.refunded\",\n \"balance.available\",\n+ \"capability.updated\",\n \"charge.captured\",\n- \"charge.expired\",\n- \"charge.failed\",\n- \"charge.pending\",\n- \"charge.refunded\",\n- \"charge.succeeded\",\n- \"charge.updated\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n+ \"charge.expired\",\n+ \"charge.failed\",\n+ \"charge.pending\",\n \"charge.refund.updated\",\n+ \"charge.refunded\",\n+ \"charge.succeeded\",\n+ \"charge.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n+ \"credit_note.created\",\n+ \"credit_note.updated\",\n+ \"credit_note.voided\",\n \"customer.created\",\n \"customer.deleted\",\n- \"customer.updated\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n@@ -57,11 +62,16 @@\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n+ \"customer.tax_id.created\",\n+ \"customer.tax_id.deleted\",\n+ \"customer.tax_id.updated\",\n+ \"customer.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n+ \"invoice.payment_action_required\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n@@ -102,12 +112,17 @@\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n+ \"person.created\",\n+ \"person.deleted\",\n+ \"person.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n+ \"radar.early_fraud_warning.created\",\n+ \"radar.early_fraud_warning.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n@@ -130,12 +145,23 @@\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n+ \"subscription_schedule.aborted\",\n+ \"subscription_schedule.canceled\",\n+ \"subscription_schedule.completed\",\n+ \"subscription_schedule.created\",\n+ \"subscription_schedule.expiring\",\n+ \"subscription_schedule.released\",\n+ \"subscription_schedule.updated\",\n+ \"tax_rate.created\",\n+ \"tax_rate.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n+ \"transfer.failed\",\n+ \"transfer.paid\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n", "issue": "Webhook missing: invoice.payment_action_required\nI can't find any reference to ```invoice.payment_action_required``` anywhere, however this is a fairly critical piece of off-session SCA workflow. Is it simply a case of adding it to the signals list?\r\n\r\nhttps://stripe.com/docs/api/events/types#event_types-invoice.payment_action_required\n", "before_files": [{"content": "\"\"\"\nsignals are sent for each event Stripe sends to the app\n\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\"\"\"\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import Signal, receiver\n\nfrom . import settings as djstripe_settings\n\nwebhook_processing_error = Signal(providing_args=[\"data\", \"exception\"])\n\n# A signal for each Event type. See https://stripe.com/docs/api/events/types\n\nWEBHOOK_SIGNALS = dict(\n [\n (hook, Signal(providing_args=[\"event\"]))\n for hook in [\n \"account.updated\",\n \"account.application.authorized\",\n \"account.application.deauthorized\",\n \"account.external_account.created\",\n \"account.external_account.deleted\",\n \"account.external_account.updated\",\n \"application_fee.created\",\n \"application_fee.refunded\",\n \"application_fee.refund.updated\",\n \"balance.available\",\n \"charge.captured\",\n \"charge.expired\",\n \"charge.failed\",\n \"charge.pending\",\n \"charge.refunded\",\n \"charge.succeeded\",\n \"charge.updated\",\n \"charge.dispute.closed\",\n \"charge.dispute.created\",\n \"charge.dispute.funds_reinstated\",\n \"charge.dispute.funds_withdrawn\",\n \"charge.dispute.updated\",\n \"charge.refund.updated\",\n \"checkout.session.completed\",\n \"coupon.created\",\n \"coupon.deleted\",\n \"coupon.updated\",\n \"customer.created\",\n \"customer.deleted\",\n \"customer.updated\",\n \"customer.discount.created\",\n \"customer.discount.deleted\",\n \"customer.discount.updated\",\n \"customer.source.created\",\n \"customer.source.deleted\",\n \"customer.source.expiring\",\n \"customer.source.updated\",\n \"customer.subscription.created\",\n \"customer.subscription.deleted\",\n \"customer.subscription.trial_will_end\",\n \"customer.subscription.updated\",\n \"file.created\",\n \"invoice.created\",\n \"invoice.deleted\",\n \"invoice.finalized\",\n \"invoice.marked_uncollectible\",\n \"invoice.payment_failed\",\n \"invoice.payment_succeeded\",\n \"invoice.sent\",\n \"invoice.upcoming\",\n \"invoice.updated\",\n \"invoice.voided\",\n \"invoiceitem.created\",\n \"invoiceitem.deleted\",\n \"invoiceitem.updated\",\n \"issuing_authorization.created\",\n \"issuing_authorization.request\",\n \"issuing_authorization.updated\",\n \"issuing_card.created\",\n \"issuing_card.updated\",\n \"issuing_cardholder.created\",\n \"issuing_cardholder.updated\",\n \"issuing_dispute.created\",\n \"issuing_dispute.updated\",\n \"issuing_settlement.created\",\n \"issuing_settlement.updated\",\n \"issuing_transaction.created\",\n \"issuing_transaction.updated\",\n \"order.created\",\n \"order.payment_failed\",\n \"order.payment_succeeded\",\n \"order.updated\",\n \"order_return.created\",\n \"payment_intent.amount_capturable_updated\",\n \"payment_intent.created\",\n \"payment_intent.payment_failed\",\n \"payment_intent.succeeded\",\n \"payment_method.attached\",\n \"payment_method.card_automatically_updated\",\n \"payment_method.detached\",\n \"payment_method.updated\",\n \"payout.canceled\",\n \"payout.created\",\n \"payout.failed\",\n \"payout.paid\",\n \"payout.updated\",\n \"plan.created\",\n \"plan.deleted\",\n \"plan.updated\",\n \"product.created\",\n \"product.deleted\",\n \"product.updated\",\n \"recipient.created\",\n \"recipient.deleted\",\n \"recipient.updated\",\n \"reporting.report_run.failed\",\n \"reporting.report_run.succeeded\",\n \"reporting.report_type.updated\",\n \"review.closed\",\n \"review.opened\",\n \"setup_intent.created\",\n \"setup_intent.setup_failed\",\n \"setup_intent.succeeded\",\n \"sigma.scheduled_query_run.created\",\n \"sku.created\",\n \"sku.deleted\",\n \"sku.updated\",\n \"source.canceled\",\n \"source.chargeable\",\n \"source.failed\",\n \"source.mandate_notification\",\n \"source.refund_attributes_required\",\n \"source.transaction.created\",\n \"source.transaction.updated\",\n \"topup.canceled\",\n \"topup.created\",\n \"topup.failed\",\n \"topup.reversed\",\n \"topup.succeeded\",\n \"transfer.created\",\n \"transfer.reversed\",\n \"transfer.updated\",\n # deprecated (no longer in events_types list) - TODO can be deleted?\n \"checkout_beta.session_succeeded\",\n \"issuer_fraud_record.created\",\n \"payment_intent.requires_capture\",\n \"subscription_schedule.canceled\",\n \"subscription_schedule.completed\",\n \"subscription_schedule.created\",\n \"subscription_schedule.released\",\n \"subscription_schedule.updated\",\n # special case? - TODO can be deleted?\n \"ping\",\n ]\n ]\n)\n\n\n@receiver(pre_delete, sender=djstripe_settings.get_subscriber_model_string())\ndef on_delete_subscriber_purge_customer(instance=None, **kwargs):\n \"\"\" Purge associated customers when the subscriber is deleted. \"\"\"\n for customer in instance.djstripe_customers.all():\n customer.purge()\n", "path": "djstripe/signals.py"}]} | 2,041 | 832 |
gh_patches_debug_11607 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Question: Why is default structuring element of 1-connectivity used by ndimage.label for getting the default markers(labeled local minima) for watershed segmentation?
## Description
In the implemented watershed.py segmentation, one can pass a connectivity argument, when `local_minima` is called by watershed (when `markers=none`) it passes the same connectivity value onto the local minima function, this generates the `markers_bool`, which then is used by `ndimage.label()` to generate the labeled markers. `ndimage.label` can take a structure argument and it defaults to 1-connectivity structure if none are given. IMO it is more intuitive to have `ndimage.label` also use the same connectivity which is passed by the user to the watershed function instead of always using the default 1-connectivity.
Below is the line in the source I am referring to:
https://github.com/scikit-image/scikit-image/blob/663e9e4b0f2ba88249bfef45bc65700555a3f24b/skimage/segmentation/_watershed.py#L76
Instead of the above would it not be better to have?
```python
if connectivity==2:
markers = ndi.label(markers_bool,
structure = [[1,1,1],
[1,1,1],
[1,1,1]])[0]
```
</issue>
<code>
[start of skimage/segmentation/_watershed.py]
1 """watershed.py - watershed algorithm
2
3 This module implements a watershed algorithm that apportions pixels into
4 marked basins. The algorithm uses a priority queue to hold the pixels
5 with the metric for the priority queue being pixel value, then the time
6 of entry into the queue - this settles ties in favor of the closest marker.
7
8 Some ideas taken from
9 Soille, "Automated Basin Delineation from Digital Elevation Models Using
10 Mathematical Morphology", Signal Processing 20 (1990) 171-182.
11
12 The most important insight in the paper is that entry time onto the queue
13 solves two problems: a pixel should be assigned to the neighbor with the
14 largest gradient or, if there is no gradient, pixels on a plateau should
15 be split between markers on opposite sides.
16
17 Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
18 Website: http://www.cellprofiler.org
19
20 Copyright (c) 2003-2009 Massachusetts Institute of Technology
21 Copyright (c) 2009-2011 Broad Institute
22 All rights reserved.
23
24 Original author: Lee Kamentsky
25 """
26
27 import numpy as np
28 from scipy import ndimage as ndi
29
30 from . import _watershed_cy
31 from ..morphology.extrema import local_minima
32 from ..morphology._util import (_validate_connectivity,
33 _offsets_to_raveled_neighbors)
34 from ..util import crop, regular_seeds
35
36
37 def _validate_inputs(image, markers, mask, connectivity):
38 """Ensure that all inputs to watershed have matching shapes and types.
39
40 Parameters
41 ----------
42 image : array
43 The input image.
44 markers : int or array of int
45 The marker image.
46 mask : array, or None
47 A boolean mask, True where we want to compute the watershed.
48 connectivity : int in {1, ..., image.ndim}
49 The connectivity of the neighborhood of a pixel.
50
51 Returns
52 -------
53 image, markers, mask : arrays
54 The validated and formatted arrays. Image will have dtype float64,
55 markers int32, and mask int8. If ``None`` was given for the mask,
56 it is a volume of all 1s.
57
58 Raises
59 ------
60 ValueError
61 If the shapes of the given arrays don't match.
62 """
63 n_pixels = image.size
64 if mask is None:
65 # Use a complete `True` mask if none is provided
66 mask = np.ones(image.shape, bool)
67 else:
68 mask = np.asanyarray(mask, dtype=bool)
69 n_pixels = np.sum(mask)
70 if mask.shape != image.shape:
71 message = ("`mask` (shape {}) must have same shape as "
72 "`image` (shape {})".format(mask.shape, image.shape))
73 raise ValueError(message)
74 if markers is None:
75 markers_bool = local_minima(image, connectivity=connectivity) * mask
76 markers = ndi.label(markers_bool)[0]
77 elif not isinstance(markers, (np.ndarray, list, tuple)):
78 # not array-like, assume int
79 # given int, assume that number of markers *within mask*.
80 markers = regular_seeds(image.shape,
81 int(markers / (n_pixels / image.size)))
82 markers *= mask
83 else:
84 markers = np.asanyarray(markers) * mask
85 if markers.shape != image.shape:
86 message = ("`markers` (shape {}) must have same shape as "
87 "`image` (shape {})".format(markers.shape, image.shape))
88 raise ValueError(message)
89 return (image.astype(np.float64),
90 markers.astype(np.int32),
91 mask.astype(np.int8))
92
93
94 def watershed(image, markers=None, connectivity=1, offset=None, mask=None,
95 compactness=0, watershed_line=False):
96 """Find watershed basins in `image` flooded from given `markers`.
97
98 Parameters
99 ----------
100 image : ndarray (2-D, 3-D, ...) of integers
101 Data array where the lowest value points are labeled first.
102 markers : int, or ndarray of int, same shape as `image`, optional
103 The desired number of markers, or an array marking the basins with the
104 values to be assigned in the label matrix. Zero means not a marker. If
105 ``None`` (no markers given), the local minima of the image are used as
106 markers.
107 connectivity : ndarray, optional
108 An array with the same number of dimensions as `image` whose
109 non-zero elements indicate neighbors for connection.
110 Following the scipy convention, default is a one-connected array of
111 the dimension of the image.
112 offset : array_like of shape image.ndim, optional
113 offset of the connectivity (one offset per dimension)
114 mask : ndarray of bools or 0s and 1s, optional
115 Array of same shape as `image`. Only points at which mask == True
116 will be labeled.
117 compactness : float, optional
118 Use compact watershed [3]_ with given compactness parameter.
119 Higher values result in more regularly-shaped watershed basins.
120 watershed_line : bool, optional
121 If watershed_line is True, a one-pixel wide line separates the regions
122 obtained by the watershed algorithm. The line has the label 0.
123
124 Returns
125 -------
126 out : ndarray
127 A labeled matrix of the same type and shape as markers
128
129 See Also
130 --------
131 skimage.segmentation.random_walker : random walker segmentation
132 A segmentation algorithm based on anisotropic diffusion, usually
133 slower than the watershed but with good results on noisy data and
134 boundaries with holes.
135
136 Notes
137 -----
138 This function implements a watershed algorithm [1]_ [2]_ that apportions
139 pixels into marked basins. The algorithm uses a priority queue to hold
140 the pixels with the metric for the priority queue being pixel value, then
141 the time of entry into the queue - this settles ties in favor of the
142 closest marker.
143
144 Some ideas taken from
145 Soille, "Automated Basin Delineation from Digital Elevation Models Using
146 Mathematical Morphology", Signal Processing 20 (1990) 171-182
147
148 The most important insight in the paper is that entry time onto the queue
149 solves two problems: a pixel should be assigned to the neighbor with the
150 largest gradient or, if there is no gradient, pixels on a plateau should
151 be split between markers on opposite sides.
152
153 This implementation converts all arguments to specific, lowest common
154 denominator types, then passes these to a C algorithm.
155
156 Markers can be determined manually, or automatically using for example
157 the local minima of the gradient of the image, or the local maxima of the
158 distance function to the background for separating overlapping objects
159 (see example).
160
161 References
162 ----------
163 .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
164
165 .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
166
167 .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
168 Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
169 Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
170 https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf
171
172 Examples
173 --------
174 The watershed algorithm is useful to separate overlapping objects.
175
176 We first generate an initial image with two overlapping circles:
177
178 >>> x, y = np.indices((80, 80))
179 >>> x1, y1, x2, y2 = 28, 28, 44, 52
180 >>> r1, r2 = 16, 20
181 >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
182 >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
183 >>> image = np.logical_or(mask_circle1, mask_circle2)
184
185 Next, we want to separate the two circles. We generate markers at the
186 maxima of the distance to the background:
187
188 >>> from scipy import ndimage as ndi
189 >>> distance = ndi.distance_transform_edt(image)
190 >>> from skimage.feature import peak_local_max
191 >>> local_maxi = peak_local_max(distance, labels=image,
192 ... footprint=np.ones((3, 3)),
193 ... indices=False)
194 >>> markers = ndi.label(local_maxi)[0]
195
196 Finally, we run the watershed on the image and markers:
197
198 >>> labels = watershed(-distance, markers, mask=image)
199
200 The algorithm works also for 3-D images, and can be used for example to
201 separate overlapping spheres.
202 """
203 image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
204 connectivity, offset = _validate_connectivity(image.ndim, connectivity,
205 offset)
206
207 # pad the image, markers, and mask so that we can use the mask to
208 # keep from running off the edges
209 pad_width = [(p, p) for p in offset]
210 image = np.pad(image, pad_width, mode='constant')
211 mask = np.pad(mask, pad_width, mode='constant').ravel()
212 output = np.pad(markers, pad_width, mode='constant')
213
214 flat_neighborhood = _offsets_to_raveled_neighbors(
215 image.shape, connectivity, center=offset)
216 marker_locations = np.flatnonzero(output)
217 image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize
218
219 _watershed_cy.watershed_raveled(image.ravel(),
220 marker_locations, flat_neighborhood,
221 mask, image_strides, compactness,
222 output.ravel(),
223 watershed_line)
224
225 output = crop(output, pad_width, copy=True)
226
227 return output
228
[end of skimage/segmentation/_watershed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py
--- a/skimage/segmentation/_watershed.py
+++ b/skimage/segmentation/_watershed.py
@@ -73,7 +73,8 @@
raise ValueError(message)
if markers is None:
markers_bool = local_minima(image, connectivity=connectivity) * mask
- markers = ndi.label(markers_bool)[0]
+ footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)
+ markers = ndi.label(markers_bool, structure=footprint)[0]
elif not isinstance(markers, (np.ndarray, list, tuple)):
# not array-like, assume int
# given int, assume that number of markers *within mask*.
| {"golden_diff": "diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py\n--- a/skimage/segmentation/_watershed.py\n+++ b/skimage/segmentation/_watershed.py\n@@ -73,7 +73,8 @@\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n- markers = ndi.label(markers_bool)[0]\n+ footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)\n+ markers = ndi.label(markers_bool, structure=footprint)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n", "issue": "Question: Why is default structuring element of 1-connectivity used by ndimage.label for getting the default markers(labeled local minima) for watershed segmentation?\n## Description\r\n\r\nIn the implemented watershed.py segmentation, one can pass a connectivity argument, when `local_minima` is called by watershed (when `markers=none`) it passes the same connectivity value onto the local minima function, this generates the `markers_bool`, which then is used by `ndimage.label()` to generate the labeled markers. `ndimage.label` can take a structure argument and it defaults to 1-connectivity structure if none are given. IMO it is more intuitive to have `ndimage.label` also use the same connectivity which is passed by the user to the watershed function instead of always using the default 1-connectivity.\r\nBelow is the line in the source I am referring to:\r\nhttps://github.com/scikit-image/scikit-image/blob/663e9e4b0f2ba88249bfef45bc65700555a3f24b/skimage/segmentation/_watershed.py#L76\r\n\r\nInstead of the above would it not be better to have?\r\n```python\r\nif connectivity==2:\r\n markers = ndi.label(markers_bool,\r\n structure = [[1,1,1],\r\n [1,1,1],\r\n [1,1,1]])[0]\r\n```\r\n\n", "before_files": [{"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\nOriginally part of CellProfiler, code licensed under both GPL and BSD licenses.\nWebsite: http://www.cellprofiler.org\n\nCopyright (c) 2003-2009 Massachusetts Institute of Technology\nCopyright (c) 2009-2011 Broad Institute\nAll rights reserved.\n\nOriginal author: Lee Kamentsky\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed_cy\nfrom ..morphology.extrema import local_minima\nfrom ..morphology._util import (_validate_connectivity,\n _offsets_to_raveled_neighbors)\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask, connectivity):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n connectivity : int in {1, ..., image.ndim}\n The connectivity of the neighborhood of a pixel.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n n_pixels = image.size\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n else:\n mask = np.asanyarray(mask, dtype=bool)\n n_pixels = np.sum(mask)\n if mask.shape != image.shape:\n message = (\"`mask` (shape {}) must have same shape as \"\n \"`image` (shape {})\".format(mask.shape, image.shape))\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n markers = ndi.label(markers_bool)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n markers = regular_seeds(image.shape,\n int(markers / (n_pixels / image.size)))\n markers *= mask\n else:\n markers = np.asanyarray(markers) * mask\n if markers.shape != image.shape:\n message = (\"`markers` (shape {}) must have same shape as \"\n \"`image` (shape {})\".format(markers.shape, image.shape))\n raise ValueError(message)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image : ndarray (2-D, 3-D, ...) of integers\n Data array where the lowest value points are labeled first.\n markers : int, or ndarray of int, same shape as `image`, optional\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker. If\n ``None`` (no markers given), the local minima of the image are used as\n markers.\n connectivity : ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset : array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask : ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n\n Returns\n -------\n out : ndarray\n A labeled matrix of the same type and shape as markers\n\n See Also\n --------\n skimage.segmentation.random_walker : random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`\n https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> local_maxi = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)),\n ... indices=False)\n >>> markers = ndi.label(local_maxi)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask, connectivity)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _offsets_to_raveled_neighbors(\n image.shape, connectivity, center=offset)\n marker_locations = np.flatnonzero(output)\n image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize\n\n _watershed_cy.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n", "path": "skimage/segmentation/_watershed.py"}]} | 3,622 | 178 |
gh_patches_debug_2800 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1534 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add API to programatically create new requests.
Original discussion: https://discourse.mitmproxy.org/t/are-there-any-script-examples-for-creating-a-get-post-put/134
It would be great if we'd have a dedicated API to make new requests from scripts, e.g. `master.new_request()` or something along those lines. This would probably just call `new_request` and then `replay_request`, but this seems useful enough to spell out explicitly.
</issue>
<code>
[start of mitmproxy/flow/master.py]
1 from __future__ import absolute_import, print_function, division
2
3 import os
4 import sys
5
6 from typing import Optional # noqa
7
8 import netlib.exceptions
9 from mitmproxy import controller
10 from mitmproxy import exceptions
11 from mitmproxy import models
12 from mitmproxy.flow import io
13 from mitmproxy.flow import modules
14 from mitmproxy.onboarding import app
15 from mitmproxy.protocol import http_replay
16
17
18 def event_sequence(f):
19 if isinstance(f, models.HTTPFlow):
20 if f.request:
21 yield "request", f
22 if f.response:
23 yield "responseheaders", f
24 yield "response", f
25 if f.error:
26 yield "error", f
27 elif isinstance(f, models.TCPFlow):
28 messages = f.messages
29 f.messages = []
30 f.reply = controller.DummyReply()
31 yield "tcp_open", f
32 while messages:
33 f.messages.append(messages.pop(0))
34 yield "tcp_message", f
35 if f.error:
36 yield "tcp_error", f
37 yield "tcp_close", f
38 else:
39 raise NotImplementedError
40
41
42 class FlowMaster(controller.Master):
43
44 @property
45 def server(self):
46 # At some point, we may want to have support for multiple servers.
47 # For now, this suffices.
48 if len(self.servers) > 0:
49 return self.servers[0]
50
51 def __init__(self, options, server, state):
52 super(FlowMaster, self).__init__(options)
53 if server:
54 self.add_server(server)
55 self.state = state
56 self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]
57 self.apps = modules.AppRegistry()
58
59 def start_app(self, host, port):
60 self.apps.add(app.mapp, host, port)
61
62 def set_stream_large_bodies(self, max_size):
63 if max_size is not None:
64 self.stream_large_bodies = modules.StreamLargeBodies(max_size)
65 else:
66 self.stream_large_bodies = False
67
68 def duplicate_flow(self, f):
69 """
70 Duplicate flow, and insert it into state without triggering any of
71 the normal flow events.
72 """
73 f2 = f.copy()
74 self.state.add_flow(f2)
75 return f2
76
77 def create_request(self, method, scheme, host, port, path):
78 """
79 this method creates a new artificial and minimalist request also adds it to flowlist
80 """
81 c = models.ClientConnection.make_dummy(("", 0))
82 s = models.ServerConnection.make_dummy((host, port))
83
84 f = models.HTTPFlow(c, s)
85 headers = models.Headers()
86
87 req = models.HTTPRequest(
88 "absolute",
89 method,
90 scheme,
91 host,
92 port,
93 path,
94 b"HTTP/1.1",
95 headers,
96 b""
97 )
98 f.request = req
99 self.load_flow(f)
100 return f
101
102 def load_flow(self, f):
103 """
104 Loads a flow
105 """
106 if isinstance(f, models.HTTPFlow):
107 if self.server and self.options.mode == "reverse":
108 f.request.host = self.server.config.upstream_server.address.host
109 f.request.port = self.server.config.upstream_server.address.port
110 f.request.scheme = self.server.config.upstream_server.scheme
111 f.reply = controller.DummyReply()
112 for e, o in event_sequence(f):
113 getattr(self, e)(o)
114
115 def load_flows(self, fr):
116 """
117 Load flows from a FlowReader object.
118 """
119 cnt = 0
120 for i in fr.stream():
121 cnt += 1
122 self.load_flow(i)
123 return cnt
124
125 def load_flows_file(self, path):
126 path = os.path.expanduser(path)
127 try:
128 if path == "-":
129 # This is incompatible with Python 3 - maybe we can use click?
130 freader = io.FlowReader(sys.stdin)
131 return self.load_flows(freader)
132 else:
133 with open(path, "rb") as f:
134 freader = io.FlowReader(f)
135 return self.load_flows(freader)
136 except IOError as v:
137 raise exceptions.FlowReadException(v.strerror)
138
139 def replay_request(self, f, block=False):
140 """
141 Replay a HTTP request to receive a new response from the server.
142
143 Args:
144 f: The flow to replay.
145 block: If True, this function will wait for the replay to finish.
146 This causes a deadlock if activated in the main thread.
147
148 Returns:
149 The thread object doing the replay.
150
151 Raises:
152 exceptions.ReplayException, if the flow is in a state
153 where it is ineligible for replay.
154 """
155
156 if f.live:
157 raise exceptions.ReplayException(
158 "Can't replay live flow."
159 )
160 if f.intercepted:
161 raise exceptions.ReplayException(
162 "Can't replay intercepted flow."
163 )
164 if f.request.raw_content is None:
165 raise exceptions.ReplayException(
166 "Can't replay flow with missing content."
167 )
168 if not f.request:
169 raise exceptions.ReplayException(
170 "Can't replay flow with missing request."
171 )
172
173 f.backup()
174 f.request.is_replay = True
175
176 f.response = None
177 f.error = None
178
179 rt = http_replay.RequestReplayThread(
180 self.server.config,
181 f,
182 self.event_queue,
183 self.should_exit
184 )
185 rt.start() # pragma: no cover
186 if block:
187 rt.join()
188 return rt
189
190 @controller.handler
191 def log(self, l):
192 self.add_log(l.msg, l.level)
193
194 @controller.handler
195 def clientconnect(self, root_layer):
196 pass
197
198 @controller.handler
199 def clientdisconnect(self, root_layer):
200 pass
201
202 @controller.handler
203 def serverconnect(self, server_conn):
204 pass
205
206 @controller.handler
207 def serverdisconnect(self, server_conn):
208 pass
209
210 @controller.handler
211 def next_layer(self, top_layer):
212 pass
213
214 @controller.handler
215 def error(self, f):
216 self.state.update_flow(f)
217
218 @controller.handler
219 def request(self, f):
220 if f.live:
221 app = self.apps.get(f.request)
222 if app:
223 err = app.serve(
224 f,
225 f.client_conn.wfile,
226 **{"mitmproxy.master": self}
227 )
228 if err:
229 self.add_log("Error in wsgi app. %s" % err, "error")
230 f.reply.kill()
231 return
232 if f not in self.state.flows: # don't add again on replay
233 self.state.add_flow(f)
234
235 @controller.handler
236 def responseheaders(self, f):
237 try:
238 if self.stream_large_bodies:
239 self.stream_large_bodies.run(f, False)
240 except netlib.exceptions.HttpException:
241 f.reply.kill()
242 return
243
244 @controller.handler
245 def response(self, f):
246 self.state.update_flow(f)
247
248 @controller.handler
249 def websockets_handshake(self, f):
250 pass
251
252 def handle_intercept(self, f):
253 self.state.update_flow(f)
254
255 def handle_accept_intercept(self, f):
256 self.state.update_flow(f)
257
258 @controller.handler
259 def tcp_open(self, flow):
260 # TODO: This would break mitmproxy currently.
261 # self.state.add_flow(flow)
262 pass
263
264 @controller.handler
265 def tcp_message(self, flow):
266 pass
267
268 @controller.handler
269 def tcp_error(self, flow):
270 pass
271
272 @controller.handler
273 def tcp_close(self, flow):
274 pass
275
[end of mitmproxy/flow/master.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/flow/master.py b/mitmproxy/flow/master.py
--- a/mitmproxy/flow/master.py
+++ b/mitmproxy/flow/master.py
@@ -187,6 +187,10 @@
rt.join()
return rt
+ def new_request(self, method, scheme, host, port, path, block=False):
+ f = self.create_request(method, scheme, host, port, path)
+ return self.replay_request(f, block)
+
@controller.handler
def log(self, l):
self.add_log(l.msg, l.level)
| {"golden_diff": "diff --git a/mitmproxy/flow/master.py b/mitmproxy/flow/master.py\n--- a/mitmproxy/flow/master.py\n+++ b/mitmproxy/flow/master.py\n@@ -187,6 +187,10 @@\n rt.join()\n return rt\n \n+ def new_request(self, method, scheme, host, port, path, block=False):\n+ f = self.create_request(method, scheme, host, port, path)\n+ return self.replay_request(f, block)\n+\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n", "issue": "Add API to programatically create new requests.\nOriginal discussion: https://discourse.mitmproxy.org/t/are-there-any-script-examples-for-creating-a-get-post-put/134\n\nIt would be great if we'd have a dedicated API to make new requests from scripts, e.g. `master.new_request()` or something along those lines. This would probably just call `new_request` and then `replay_request`, but this seems useful enough to spell out explicitly.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\n\nimport os\nimport sys\n\nfrom typing import Optional # noqa\n\nimport netlib.exceptions\nfrom mitmproxy import controller\nfrom mitmproxy import exceptions\nfrom mitmproxy import models\nfrom mitmproxy.flow import io\nfrom mitmproxy.flow import modules\nfrom mitmproxy.onboarding import app\nfrom mitmproxy.protocol import http_replay\n\n\ndef event_sequence(f):\n if isinstance(f, models.HTTPFlow):\n if f.request:\n yield \"request\", f\n if f.response:\n yield \"responseheaders\", f\n yield \"response\", f\n if f.error:\n yield \"error\", f\n elif isinstance(f, models.TCPFlow):\n messages = f.messages\n f.messages = []\n f.reply = controller.DummyReply()\n yield \"tcp_open\", f\n while messages:\n f.messages.append(messages.pop(0))\n yield \"tcp_message\", f\n if f.error:\n yield \"tcp_error\", f\n yield \"tcp_close\", f\n else:\n raise NotImplementedError\n\n\nclass FlowMaster(controller.Master):\n\n @property\n def server(self):\n # At some point, we may want to have support for multiple servers.\n # For now, this suffices.\n if len(self.servers) > 0:\n return self.servers[0]\n\n def __init__(self, options, server, state):\n super(FlowMaster, self).__init__(options)\n if server:\n self.add_server(server)\n self.state = state\n self.stream_large_bodies = None # type: Optional[modules.StreamLargeBodies]\n self.apps = modules.AppRegistry()\n\n def start_app(self, host, port):\n self.apps.add(app.mapp, host, port)\n\n def set_stream_large_bodies(self, max_size):\n if max_size is not None:\n self.stream_large_bodies = modules.StreamLargeBodies(max_size)\n else:\n self.stream_large_bodies = False\n\n def duplicate_flow(self, f):\n \"\"\"\n Duplicate flow, and insert it into state without triggering any of\n the normal flow events.\n \"\"\"\n f2 = f.copy()\n self.state.add_flow(f2)\n return f2\n\n def create_request(self, method, scheme, host, port, path):\n \"\"\"\n this method creates a new artificial and minimalist request also adds it to flowlist\n \"\"\"\n c = models.ClientConnection.make_dummy((\"\", 0))\n s = models.ServerConnection.make_dummy((host, port))\n\n f = models.HTTPFlow(c, s)\n headers = models.Headers()\n\n req = models.HTTPRequest(\n \"absolute\",\n method,\n scheme,\n host,\n port,\n path,\n b\"HTTP/1.1\",\n headers,\n b\"\"\n )\n f.request = req\n self.load_flow(f)\n return f\n\n def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n if isinstance(f, models.HTTPFlow):\n if self.server and self.options.mode == \"reverse\":\n f.request.host = self.server.config.upstream_server.address.host\n f.request.port = self.server.config.upstream_server.address.port\n f.request.scheme = self.server.config.upstream_server.scheme\n f.reply = controller.DummyReply()\n for e, o in event_sequence(f):\n getattr(self, e)(o)\n\n def load_flows(self, fr):\n \"\"\"\n Load flows from a FlowReader object.\n \"\"\"\n cnt = 0\n for i in fr.stream():\n cnt += 1\n self.load_flow(i)\n return cnt\n\n def load_flows_file(self, path):\n path = os.path.expanduser(path)\n try:\n if path == \"-\":\n # This is incompatible with Python 3 - maybe we can use click?\n freader = io.FlowReader(sys.stdin)\n return self.load_flows(freader)\n else:\n with open(path, \"rb\") as f:\n freader = io.FlowReader(f)\n return self.load_flows(freader)\n except IOError as v:\n raise exceptions.FlowReadException(v.strerror)\n\n def replay_request(self, f, block=False):\n \"\"\"\n Replay a HTTP request to receive a new response from the server.\n\n Args:\n f: The flow to replay.\n block: If True, this function will wait for the replay to finish.\n This causes a deadlock if activated in the main thread.\n\n Returns:\n The thread object doing the replay.\n\n Raises:\n exceptions.ReplayException, if the flow is in a state\n where it is ineligible for replay.\n \"\"\"\n\n if f.live:\n raise exceptions.ReplayException(\n \"Can't replay live flow.\"\n )\n if f.intercepted:\n raise exceptions.ReplayException(\n \"Can't replay intercepted flow.\"\n )\n if f.request.raw_content is None:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing content.\"\n )\n if not f.request:\n raise exceptions.ReplayException(\n \"Can't replay flow with missing request.\"\n )\n\n f.backup()\n f.request.is_replay = True\n\n f.response = None\n f.error = None\n\n rt = http_replay.RequestReplayThread(\n self.server.config,\n f,\n self.event_queue,\n self.should_exit\n )\n rt.start() # pragma: no cover\n if block:\n rt.join()\n return rt\n\n @controller.handler\n def log(self, l):\n self.add_log(l.msg, l.level)\n\n @controller.handler\n def clientconnect(self, root_layer):\n pass\n\n @controller.handler\n def clientdisconnect(self, root_layer):\n pass\n\n @controller.handler\n def serverconnect(self, server_conn):\n pass\n\n @controller.handler\n def serverdisconnect(self, server_conn):\n pass\n\n @controller.handler\n def next_layer(self, top_layer):\n pass\n\n @controller.handler\n def error(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def request(self, f):\n if f.live:\n app = self.apps.get(f.request)\n if app:\n err = app.serve(\n f,\n f.client_conn.wfile,\n **{\"mitmproxy.master\": self}\n )\n if err:\n self.add_log(\"Error in wsgi app. %s\" % err, \"error\")\n f.reply.kill()\n return\n if f not in self.state.flows: # don't add again on replay\n self.state.add_flow(f)\n\n @controller.handler\n def responseheaders(self, f):\n try:\n if self.stream_large_bodies:\n self.stream_large_bodies.run(f, False)\n except netlib.exceptions.HttpException:\n f.reply.kill()\n return\n\n @controller.handler\n def response(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def websockets_handshake(self, f):\n pass\n\n def handle_intercept(self, f):\n self.state.update_flow(f)\n\n def handle_accept_intercept(self, f):\n self.state.update_flow(f)\n\n @controller.handler\n def tcp_open(self, flow):\n # TODO: This would break mitmproxy currently.\n # self.state.add_flow(flow)\n pass\n\n @controller.handler\n def tcp_message(self, flow):\n pass\n\n @controller.handler\n def tcp_error(self, flow):\n pass\n\n @controller.handler\n def tcp_close(self, flow):\n pass\n", "path": "mitmproxy/flow/master.py"}]} | 3,015 | 134 |
gh_patches_debug_22743 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-3109 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
With pydantic v2 a field marked as Optional[conint] throws an exception
## Description
I experienced this issue with `Optional[conint]`, but I suspect this will happen with any builtin pydantic field whose type ends up being an `Annotated` type. I was able to reproduce this in the existing tests within `tests/experimental/pydantic/test_fields.py`, test method `test_conint`. Just replace `field: pydantic.conint(lt=100)` with `field: Optional[pydantic.conint(lt=100)]`. The exception is copied below in the additional context.
At least in my case I can work around this by just creating a validator for my field instead of using `conint`. I only have one field that uses it. However, it's definitely not ideal.
Example code below to see the error:
```python
class Model(pydantic.BaseModel):
field: Optional[pydantic.conint(lt=100)]
@strawberry.experimental.pydantic.type(model=Model)
class Type:
field: strawberry.auto
```
## System Information
- Operating system: Mac OS
- Strawberry version: Need to use a new enough version for pydantic v2 support.
## Additional Context
I'm guessing we need to add some additional logic inside `replace_types_recursively` that handles the `Annotated` type more gracefully? One thing that was interesting was when I was debugging, the `Annotated` type only comes in with `Optional`. If I just directly use `conint`, the type comes in as just an `int`. I don't have a lot of experience with python type code so I was afraid to try and put in a fix.
<!-- Add any other relevant information about the problem here. -->
```
tests/experimental/pydantic/test_fields.py:92 (test_conint)
@needs_pydantic_v2
def test_conint():
class Model(pydantic.BaseModel):
field: Optional[pydantic.conint(lt=100)]
@strawberry.experimental.pydantic.type(Model)
> class Type:
test_fields.py:99:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../../strawberry/experimental/pydantic/object_type.py:183: in wrap
all_model_fields: List[DataclassCreationFields] = [
../../../strawberry/experimental/pydantic/object_type.py:184: in <listcomp>
_build_dataclass_creation_fields(
../../../strawberry/experimental/pydantic/object_type.py:67: in _build_dataclass_creation_fields
get_type_for_field(field, is_input)
../../../strawberry/experimental/pydantic/object_type.py:49: in get_type_for_field
replaced_type = replace_types_recursively(outer_type, is_input)
../../../strawberry/experimental/pydantic/fields.py:172: in replace_types_recursively
converted = tuple(
../../../strawberry/experimental/pydantic/fields.py:173: in <genexpr>
replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)
../../../strawberry/experimental/pydantic/fields.py:180: in replace_types_recursively
replaced_type = replaced_type.copy_with(converted)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = typing.Annotated[int, None, Interval(gt=None, ge=None, lt=100, le=None), None]
params = (<class 'int'>, None, Interval(gt=None, ge=None, lt=100, le=None), None)
def copy_with(self, params):
> assert len(params) == 1
E AssertionError
```
<!-- POLAR PLEDGE BADGE START -->
## Upvote & Fund
- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.
- We receive the funding once the issue is completed & confirmed by you.
- Thank you in advance for helping prioritize & fund our backlog.
<a href="https://polar.sh/strawberry-graphql/strawberry/issues/3107">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of strawberry/experimental/pydantic/fields.py]
1 import builtins
2 from decimal import Decimal
3 from typing import Any, Dict, List, Optional, Type, Union
4 from uuid import UUID
5
6 import pydantic
7 from pydantic import BaseModel
8
9 from strawberry.experimental.pydantic._compat import (
10 IS_PYDANTIC_V1,
11 get_args,
12 get_origin,
13 is_new_type,
14 lenient_issubclass,
15 new_type_supertype,
16 )
17 from strawberry.experimental.pydantic.exceptions import (
18 UnregisteredTypeException,
19 UnsupportedTypeError,
20 )
21 from strawberry.types.types import StrawberryObjectDefinition
22
23 try:
24 from types import UnionType as TypingUnionType
25 except ImportError:
26 import sys
27
28 if sys.version_info < (3, 10):
29 TypingUnionType = ()
30 else:
31 raise
32
33 try:
34 from typing import GenericAlias as TypingGenericAlias # type: ignore
35 except ImportError:
36 import sys
37
38 # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
39 # we do this under a conditional to avoid a mypy :)
40 if sys.version_info < (3, 9):
41 TypingGenericAlias = ()
42 else:
43 raise
44
45 ATTR_TO_TYPE_MAP = {
46 "NoneStr": Optional[str],
47 "NoneBytes": Optional[bytes],
48 "StrBytes": None,
49 "NoneStrBytes": None,
50 "StrictStr": str,
51 "ConstrainedBytes": bytes,
52 "conbytes": bytes,
53 "ConstrainedStr": str,
54 "constr": str,
55 "EmailStr": str,
56 "PyObject": None,
57 "ConstrainedInt": int,
58 "conint": int,
59 "PositiveInt": int,
60 "NegativeInt": int,
61 "ConstrainedFloat": float,
62 "confloat": float,
63 "PositiveFloat": float,
64 "NegativeFloat": float,
65 "ConstrainedDecimal": Decimal,
66 "condecimal": Decimal,
67 "UUID1": UUID,
68 "UUID3": UUID,
69 "UUID4": UUID,
70 "UUID5": UUID,
71 "FilePath": None,
72 "DirectoryPath": None,
73 "Json": None,
74 "JsonWrapper": None,
75 "SecretStr": str,
76 "SecretBytes": bytes,
77 "StrictBool": bool,
78 "StrictInt": int,
79 "StrictFloat": float,
80 "PaymentCardNumber": None,
81 "ByteSize": None,
82 "AnyUrl": str,
83 "AnyHttpUrl": str,
84 "HttpUrl": str,
85 "PostgresDsn": str,
86 "RedisDsn": str,
87 }
88
89 ATTR_TO_TYPE_MAP_Pydantic_V2 = {
90 "EmailStr": str,
91 "SecretStr": str,
92 "SecretBytes": bytes,
93 "AnyUrl": str,
94 }
95
96 ATTR_TO_TYPE_MAP_Pydantic_Core_V2 = {
97 "MultiHostUrl": str,
98 }
99
100
101 def get_fields_map_for_v2() -> Dict[Any, Any]:
102 import pydantic_core
103
104 fields_map = {
105 getattr(pydantic, field_name): type
106 for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_V2.items()
107 if hasattr(pydantic, field_name)
108 }
109 fields_map.update(
110 {
111 getattr(pydantic_core, field_name): type
112 for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_Core_V2.items()
113 if hasattr(pydantic_core, field_name)
114 }
115 )
116
117 return fields_map
118
119
120 FIELDS_MAP = (
121 {
122 getattr(pydantic, field_name): type
123 for field_name, type in ATTR_TO_TYPE_MAP.items()
124 if hasattr(pydantic, field_name)
125 }
126 if IS_PYDANTIC_V1
127 else get_fields_map_for_v2()
128 )
129
130
131 def get_basic_type(type_: Any) -> Type[Any]:
132 if IS_PYDANTIC_V1:
133 # only pydantic v1 has these
134 if lenient_issubclass(type_, pydantic.ConstrainedInt):
135 return int
136 if lenient_issubclass(type_, pydantic.ConstrainedFloat):
137 return float
138 if lenient_issubclass(type_, pydantic.ConstrainedStr):
139 return str
140 if lenient_issubclass(type_, pydantic.ConstrainedList):
141 return List[get_basic_type(type_.item_type)] # type: ignore
142
143 if type_ in FIELDS_MAP:
144 type_ = FIELDS_MAP.get(type_)
145 if type_ is None:
146 raise UnsupportedTypeError()
147
148 if is_new_type(type_):
149 return new_type_supertype(type_)
150
151 return type_
152
153
154 def replace_pydantic_types(type_: Any, is_input: bool) -> Any:
155 if lenient_issubclass(type_, BaseModel):
156 attr = "_strawberry_input_type" if is_input else "_strawberry_type"
157 if hasattr(type_, attr):
158 return getattr(type_, attr)
159 else:
160 raise UnregisteredTypeException(type_)
161 return type_
162
163
164 def replace_types_recursively(type_: Any, is_input: bool) -> Any:
165 """Runs the conversions recursively into the arguments of generic types if any"""
166 basic_type = get_basic_type(type_)
167 replaced_type = replace_pydantic_types(basic_type, is_input)
168
169 origin = get_origin(type_)
170 if not origin or not hasattr(type_, "__args__"):
171 return replaced_type
172
173 converted = tuple(
174 replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)
175 )
176
177 if isinstance(replaced_type, TypingGenericAlias):
178 return TypingGenericAlias(origin, converted)
179 if isinstance(replaced_type, TypingUnionType):
180 return Union[converted]
181 replaced_type = replaced_type.copy_with(converted)
182
183 if isinstance(replaced_type, StrawberryObjectDefinition):
184 # TODO: Not sure if this is necessary. No coverage in tests
185 # TODO: Unnecessary with StrawberryObject
186 replaced_type = builtins.type(
187 replaced_type.name,
188 (),
189 {"__strawberry_definition__": replaced_type},
190 )
191
192 return replaced_type
193
[end of strawberry/experimental/pydantic/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py
--- a/strawberry/experimental/pydantic/fields.py
+++ b/strawberry/experimental/pydantic/fields.py
@@ -1,6 +1,7 @@
import builtins
from decimal import Decimal
from typing import Any, Dict, List, Optional, Type, Union
+from typing_extensions import Annotated
from uuid import UUID
import pydantic
@@ -42,6 +43,7 @@
else:
raise
+
ATTR_TO_TYPE_MAP = {
"NoneStr": Optional[str],
"NoneBytes": Optional[bytes],
@@ -178,6 +180,11 @@
return TypingGenericAlias(origin, converted)
if isinstance(replaced_type, TypingUnionType):
return Union[converted]
+
+ # TODO: investigate if we could move the check for annotated to the top
+ if origin is Annotated and converted:
+ converted = (converted[0],)
+
replaced_type = replaced_type.copy_with(converted)
if isinstance(replaced_type, StrawberryObjectDefinition):
| {"golden_diff": "diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py\n--- a/strawberry/experimental/pydantic/fields.py\n+++ b/strawberry/experimental/pydantic/fields.py\n@@ -1,6 +1,7 @@\n import builtins\n from decimal import Decimal\n from typing import Any, Dict, List, Optional, Type, Union\n+from typing_extensions import Annotated\n from uuid import UUID\n \n import pydantic\n@@ -42,6 +43,7 @@\n else:\n raise\n \n+\n ATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n@@ -178,6 +180,11 @@\n return TypingGenericAlias(origin, converted)\n if isinstance(replaced_type, TypingUnionType):\n return Union[converted]\n+\n+ # TODO: investigate if we could move the check for annotated to the top\n+ if origin is Annotated and converted:\n+ converted = (converted[0],)\n+\n replaced_type = replaced_type.copy_with(converted)\n \n if isinstance(replaced_type, StrawberryObjectDefinition):\n", "issue": "With pydantic v2 a field marked as Optional[conint] throws an exception\n## Description\r\n\r\nI experienced this issue with `Optional[conint]`, but I suspect this will happen with any builtin pydantic field whose type ends up being an `Annotated` type. I was able to reproduce this in the existing tests within `tests/experimental/pydantic/test_fields.py`, test method `test_conint`. Just replace `field: pydantic.conint(lt=100)` with `field: Optional[pydantic.conint(lt=100)]`. The exception is copied below in the additional context.\r\n\r\nAt least in my case I can work around this by just creating a validator for my field instead of using `conint`. I only have one field that uses it. However, it's definitely not ideal.\r\n\r\nExample code below to see the error:\r\n\r\n```python\r\nclass Model(pydantic.BaseModel):\r\n field: Optional[pydantic.conint(lt=100)]\r\n\r\[email protected](model=Model)\r\nclass Type:\r\n field: strawberry.auto\r\n```\r\n\r\n## System Information\r\n\r\n - Operating system: Mac OS\r\n - Strawberry version: Need to use a new enough version for pydantic v2 support.\r\n\r\n## Additional Context\r\n\r\nI'm guessing we need to add some additional logic inside `replace_types_recursively` that handles the `Annotated` type more gracefully? One thing that was interesting was when I was debugging, the `Annotated` type only comes in with `Optional`. If I just directly use `conint`, the type comes in as just an `int`. I don't have a lot of experience with python type code so I was afraid to try and put in a fix.\r\n\r\n<!-- Add any other relevant information about the problem here. -->\r\n```\r\ntests/experimental/pydantic/test_fields.py:92 (test_conint)\r\n@needs_pydantic_v2\r\n def test_conint():\r\n class Model(pydantic.BaseModel):\r\n field: Optional[pydantic.conint(lt=100)]\r\n \r\n @strawberry.experimental.pydantic.type(Model)\r\n> class Type:\r\n\r\ntest_fields.py:99: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n../../../strawberry/experimental/pydantic/object_type.py:183: in wrap\r\n all_model_fields: List[DataclassCreationFields] = [\r\n../../../strawberry/experimental/pydantic/object_type.py:184: in <listcomp>\r\n _build_dataclass_creation_fields(\r\n../../../strawberry/experimental/pydantic/object_type.py:67: in _build_dataclass_creation_fields\r\n get_type_for_field(field, is_input)\r\n../../../strawberry/experimental/pydantic/object_type.py:49: in get_type_for_field\r\n replaced_type = replace_types_recursively(outer_type, is_input)\r\n../../../strawberry/experimental/pydantic/fields.py:172: in replace_types_recursively\r\n converted = tuple(\r\n../../../strawberry/experimental/pydantic/fields.py:173: in <genexpr>\r\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\r\n../../../strawberry/experimental/pydantic/fields.py:180: in replace_types_recursively\r\n replaced_type = replaced_type.copy_with(converted)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nself = typing.Annotated[int, None, Interval(gt=None, ge=None, lt=100, le=None), None]\r\nparams = (<class 'int'>, None, Interval(gt=None, ge=None, lt=100, le=None), None)\r\n\r\n def copy_with(self, params):\r\n> assert len(params) == 1\r\nE AssertionError\r\n```\n\n<!-- POLAR PLEDGE BADGE START -->\n## Upvote & Fund\n\n- We're using [Polar.sh](https://polar.sh/strawberry-graphql) so you can upvote and help fund this issue.\n- We receive the funding once the issue is completed & confirmed by you.\n- Thank you in advance for helping prioritize & fund our backlog.\n\n<a href=\"https://polar.sh/strawberry-graphql/strawberry/issues/3107\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/strawberry-graphql/strawberry/issues/3107/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, Dict, List, Optional, Type, Union\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\n\nfrom strawberry.experimental.pydantic._compat import (\n IS_PYDANTIC_V1,\n get_args,\n get_origin,\n is_new_type,\n lenient_issubclass,\n new_type_supertype,\n)\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import StrawberryObjectDefinition\n\ntry:\n from types import UnionType as TypingUnionType\nexcept ImportError:\n import sys\n\n if sys.version_info < (3, 10):\n TypingUnionType = ()\n else:\n raise\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n import sys\n\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n # we do this under a conditional to avoid a mypy :)\n if sys.version_info < (3, 9):\n TypingGenericAlias = ()\n else:\n raise\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\nATTR_TO_TYPE_MAP_Pydantic_V2 = {\n \"EmailStr\": str,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"AnyUrl\": str,\n}\n\nATTR_TO_TYPE_MAP_Pydantic_Core_V2 = {\n \"MultiHostUrl\": str,\n}\n\n\ndef get_fields_map_for_v2() -> Dict[Any, Any]:\n import pydantic_core\n\n fields_map = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_V2.items()\n if hasattr(pydantic, field_name)\n }\n fields_map.update(\n {\n getattr(pydantic_core, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP_Pydantic_Core_V2.items()\n if hasattr(pydantic_core, field_name)\n }\n )\n\n return fields_map\n\n\nFIELDS_MAP = (\n {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n }\n if IS_PYDANTIC_V1\n else get_fields_map_for_v2()\n)\n\n\ndef get_basic_type(type_: Any) -> Type[Any]:\n if IS_PYDANTIC_V1:\n # only pydantic v1 has these\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool) -> Any:\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n if isinstance(replaced_type, TypingUnionType):\n return Union[converted]\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, StrawberryObjectDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"__strawberry_definition__\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}]} | 3,425 | 260 |
gh_patches_debug_35352 | rasdani/github-patches | git_diff | zulip__zulip-23388 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Save failed: Only 2 custom profile fields can be displayed in the profile summary.
I tried to set a custom profile field to be displayed in the profile summary. However, it fails with an error message "Save failed: Only 2 custom profile fields can be displayed in the profile summary." even though no custom profile field has been set to displayed.
https://user-images.githubusercontent.com/2891235/198561250-59519f45-85a4-4490-8f72-9fae343930be.mov
Server: hosted Zulip server
```console
Zulip Server
Version 6.0-dev-2021-gd461383c1f
Forked from upstream at 6.0-dev-2009-g0affc7ac6f
```
Client: Zulip Desktop v5.9.3
</issue>
<code>
[start of zerver/views/custom_profile_fields.py]
1 from typing import List, Optional, cast
2
3 import orjson
4 from django.core.exceptions import ValidationError
5 from django.db import IntegrityError
6 from django.http import HttpRequest, HttpResponse
7 from django.utils.translation import gettext as _
8
9 from zerver.actions.custom_profile_fields import (
10 check_remove_custom_profile_field_value,
11 do_remove_realm_custom_profile_field,
12 do_update_user_custom_profile_data_if_changed,
13 try_add_realm_custom_profile_field,
14 try_add_realm_default_custom_profile_field,
15 try_reorder_realm_custom_profile_fields,
16 try_update_realm_custom_profile_field,
17 )
18 from zerver.decorator import human_users_only, require_realm_admin
19 from zerver.lib.exceptions import JsonableError
20 from zerver.lib.external_accounts import validate_external_account_field_data
21 from zerver.lib.request import REQ, has_request_variables
22 from zerver.lib.response import json_success
23 from zerver.lib.types import ProfileDataElementUpdateDict, ProfileFieldData, Validator
24 from zerver.lib.users import validate_user_custom_profile_data
25 from zerver.lib.validator import (
26 check_bool,
27 check_capped_string,
28 check_dict,
29 check_dict_only,
30 check_int,
31 check_list,
32 check_string,
33 check_union,
34 validate_select_field_data,
35 )
36 from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm
37
38
39 def list_realm_custom_profile_fields(
40 request: HttpRequest, user_profile: UserProfile
41 ) -> HttpResponse:
42 fields = custom_profile_fields_for_realm(user_profile.realm_id)
43 return json_success(request, data={"custom_fields": [f.as_dict() for f in fields]})
44
45
46 hint_validator = check_capped_string(CustomProfileField.HINT_MAX_LENGTH)
47 name_validator = check_capped_string(CustomProfileField.NAME_MAX_LENGTH)
48
49
50 def validate_field_name_and_hint(name: str, hint: str) -> None:
51 if not name.strip():
52 raise JsonableError(_("Label cannot be blank."))
53
54 try:
55 hint_validator("hint", hint)
56 name_validator("name", name)
57 except ValidationError as error:
58 raise JsonableError(error.message)
59
60
61 def validate_custom_field_data(field_type: int, field_data: ProfileFieldData) -> None:
62 try:
63 if field_type == CustomProfileField.SELECT:
64 # Choice type field must have at least have one choice
65 if len(field_data) < 1:
66 raise JsonableError(_("Field must have at least one choice."))
67 validate_select_field_data(field_data)
68 elif field_type == CustomProfileField.EXTERNAL_ACCOUNT:
69 validate_external_account_field_data(field_data)
70 except ValidationError as error:
71 raise JsonableError(error.message)
72
73
74 def validate_display_in_profile_summary_field(
75 field_type: int, display_in_profile_summary: bool
76 ) -> None:
77 if not display_in_profile_summary:
78 return
79
80 # The LONG_TEXT field type doesn't make sense visually for profile
81 # field summaries. The USER field type will require some further
82 # client support.
83 if field_type == CustomProfileField.LONG_TEXT or field_type == CustomProfileField.USER:
84 raise JsonableError(_("Field type not supported for display in profile summary."))
85
86
87 def is_default_external_field(field_type: int, field_data: ProfileFieldData) -> bool:
88 if field_type != CustomProfileField.EXTERNAL_ACCOUNT:
89 return False
90 if field_data["subtype"] == "custom":
91 return False
92 return True
93
94
95 def validate_custom_profile_field(
96 name: str,
97 hint: str,
98 field_type: int,
99 field_data: ProfileFieldData,
100 display_in_profile_summary: bool,
101 ) -> None:
102 # Validate field data
103 validate_custom_field_data(field_type, field_data)
104
105 if not is_default_external_field(field_type, field_data):
106 # If field is default external field then we will fetch all data
107 # from our default field dictionary, so no need to validate name or hint
108 # Validate field name, hint if not default external account field
109 validate_field_name_and_hint(name, hint)
110
111 field_types = [i[0] for i in CustomProfileField.FIELD_TYPE_CHOICES]
112 if field_type not in field_types:
113 raise JsonableError(_("Invalid field type."))
114
115 validate_display_in_profile_summary_field(field_type, display_in_profile_summary)
116
117
118 check_profile_field_data: Validator[ProfileFieldData] = check_dict(
119 value_validator=check_union([check_dict(value_validator=check_string), check_string])
120 )
121
122
123 def update_only_display_in_profile_summary(
124 requested_name: str,
125 requested_hint: str,
126 requested_field_data: ProfileFieldData,
127 existing_field: CustomProfileField,
128 ) -> bool:
129 if (
130 requested_name != existing_field.name
131 or requested_hint != existing_field.hint
132 or requested_field_data != orjson.loads(existing_field.field_data)
133 ):
134 return False
135 return True
136
137
138 def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:
139 query = CustomProfileField.objects.filter(display_in_profile_summary=True)
140 if profile_field_id is not None:
141 query = query.exclude(id=profile_field_id)
142 return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS
143
144
145 @require_realm_admin
146 @has_request_variables
147 def create_realm_custom_profile_field(
148 request: HttpRequest,
149 user_profile: UserProfile,
150 name: str = REQ(default="", converter=lambda var_name, x: x.strip()),
151 hint: str = REQ(default=""),
152 field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),
153 field_type: int = REQ(json_validator=check_int),
154 display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
155 ) -> HttpResponse:
156 if display_in_profile_summary and display_in_profile_summary_limit_reached():
157 raise JsonableError(
158 _("Only 2 custom profile fields can be displayed in the profile summary.")
159 )
160
161 validate_custom_profile_field(name, hint, field_type, field_data, display_in_profile_summary)
162 try:
163 if is_default_external_field(field_type, field_data):
164 field_subtype = field_data["subtype"]
165 assert isinstance(field_subtype, str)
166 field = try_add_realm_default_custom_profile_field(
167 realm=user_profile.realm,
168 field_subtype=field_subtype,
169 display_in_profile_summary=display_in_profile_summary,
170 )
171 return json_success(request, data={"id": field.id})
172 else:
173 field = try_add_realm_custom_profile_field(
174 realm=user_profile.realm,
175 name=name,
176 field_data=field_data,
177 field_type=field_type,
178 hint=hint,
179 display_in_profile_summary=display_in_profile_summary,
180 )
181 return json_success(request, data={"id": field.id})
182 except IntegrityError:
183 raise JsonableError(_("A field with that label already exists."))
184
185
186 @require_realm_admin
187 def delete_realm_custom_profile_field(
188 request: HttpRequest, user_profile: UserProfile, field_id: int
189 ) -> HttpResponse:
190 try:
191 field = CustomProfileField.objects.get(id=field_id)
192 except CustomProfileField.DoesNotExist:
193 raise JsonableError(_("Field id {id} not found.").format(id=field_id))
194
195 do_remove_realm_custom_profile_field(realm=user_profile.realm, field=field)
196 return json_success(request)
197
198
199 @require_realm_admin
200 @has_request_variables
201 def update_realm_custom_profile_field(
202 request: HttpRequest,
203 user_profile: UserProfile,
204 field_id: int,
205 name: str = REQ(default="", converter=lambda var_name, x: x.strip()),
206 hint: str = REQ(default=""),
207 field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),
208 display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
209 ) -> HttpResponse:
210 realm = user_profile.realm
211 try:
212 field = CustomProfileField.objects.get(realm=realm, id=field_id)
213 except CustomProfileField.DoesNotExist:
214 raise JsonableError(_("Field id {id} not found.").format(id=field_id))
215
216 if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):
217 raise JsonableError(
218 _("Only 2 custom profile fields can be displayed in the profile summary.")
219 )
220
221 if field.field_type == CustomProfileField.EXTERNAL_ACCOUNT:
222 # HACK: Allow changing the display_in_profile_summary property
223 # of default external account types, but not any others.
224 #
225 # TODO: Make the name/hint/field_data parameters optional, and
226 # just require that None was passed for all of them for this case.
227 if is_default_external_field(
228 field.field_type, orjson.loads(field.field_data)
229 ) and not update_only_display_in_profile_summary(name, hint, field_data, field):
230 raise JsonableError(_("Default custom field cannot be updated."))
231
232 validate_custom_profile_field(
233 name, hint, field.field_type, field_data, display_in_profile_summary
234 )
235 try:
236 try_update_realm_custom_profile_field(
237 realm,
238 field,
239 name,
240 hint=hint,
241 field_data=field_data,
242 display_in_profile_summary=display_in_profile_summary,
243 )
244 except IntegrityError:
245 raise JsonableError(_("A field with that label already exists."))
246 return json_success(request)
247
248
249 @require_realm_admin
250 @has_request_variables
251 def reorder_realm_custom_profile_fields(
252 request: HttpRequest,
253 user_profile: UserProfile,
254 order: List[int] = REQ(json_validator=check_list(check_int)),
255 ) -> HttpResponse:
256 try_reorder_realm_custom_profile_fields(user_profile.realm, order)
257 return json_success(request)
258
259
260 @human_users_only
261 @has_request_variables
262 def remove_user_custom_profile_data(
263 request: HttpRequest,
264 user_profile: UserProfile,
265 data: List[int] = REQ(json_validator=check_list(check_int)),
266 ) -> HttpResponse:
267 for field_id in data:
268 check_remove_custom_profile_field_value(user_profile, field_id)
269 return json_success(request)
270
271
272 check_profile_data_element_update_dict = cast(
273 Validator[ProfileDataElementUpdateDict],
274 check_dict_only(
275 [
276 ("id", check_int),
277 ("value", check_union([check_string, check_list(check_int)])),
278 ]
279 ),
280 )
281
282
283 @human_users_only
284 @has_request_variables
285 def update_user_custom_profile_data(
286 request: HttpRequest,
287 user_profile: UserProfile,
288 data: List[ProfileDataElementUpdateDict] = REQ(
289 json_validator=check_list(
290 check_profile_data_element_update_dict,
291 )
292 ),
293 ) -> HttpResponse:
294
295 validate_user_custom_profile_data(user_profile.realm.id, data)
296 do_update_user_custom_profile_data_if_changed(user_profile, data)
297 # We need to call this explicitly otherwise constraints are not check
298 return json_success(request)
299
[end of zerver/views/custom_profile_fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/views/custom_profile_fields.py b/zerver/views/custom_profile_fields.py
--- a/zerver/views/custom_profile_fields.py
+++ b/zerver/views/custom_profile_fields.py
@@ -33,7 +33,7 @@
check_union,
validate_select_field_data,
)
-from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm
+from zerver.models import CustomProfileField, Realm, UserProfile, custom_profile_fields_for_realm
def list_realm_custom_profile_fields(
@@ -135,8 +135,10 @@
return True
-def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:
- query = CustomProfileField.objects.filter(display_in_profile_summary=True)
+def display_in_profile_summary_limit_reached(
+ realm: Realm, profile_field_id: Optional[int] = None
+) -> bool:
+ query = CustomProfileField.objects.filter(realm=realm, display_in_profile_summary=True)
if profile_field_id is not None:
query = query.exclude(id=profile_field_id)
return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS
@@ -153,7 +155,7 @@
field_type: int = REQ(json_validator=check_int),
display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),
) -> HttpResponse:
- if display_in_profile_summary and display_in_profile_summary_limit_reached():
+ if display_in_profile_summary and display_in_profile_summary_limit_reached(user_profile.realm):
raise JsonableError(
_("Only 2 custom profile fields can be displayed in the profile summary.")
)
@@ -213,7 +215,9 @@
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
- if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):
+ if display_in_profile_summary and display_in_profile_summary_limit_reached(
+ user_profile.realm, field.id
+ ):
raise JsonableError(
_("Only 2 custom profile fields can be displayed in the profile summary.")
)
| {"golden_diff": "diff --git a/zerver/views/custom_profile_fields.py b/zerver/views/custom_profile_fields.py\n--- a/zerver/views/custom_profile_fields.py\n+++ b/zerver/views/custom_profile_fields.py\n@@ -33,7 +33,7 @@\n check_union,\n validate_select_field_data,\n )\n-from zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm\n+from zerver.models import CustomProfileField, Realm, UserProfile, custom_profile_fields_for_realm\n \n \n def list_realm_custom_profile_fields(\n@@ -135,8 +135,10 @@\n return True\n \n \n-def display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:\n- query = CustomProfileField.objects.filter(display_in_profile_summary=True)\n+def display_in_profile_summary_limit_reached(\n+ realm: Realm, profile_field_id: Optional[int] = None\n+) -> bool:\n+ query = CustomProfileField.objects.filter(realm=realm, display_in_profile_summary=True)\n if profile_field_id is not None:\n query = query.exclude(id=profile_field_id)\n return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS\n@@ -153,7 +155,7 @@\n field_type: int = REQ(json_validator=check_int),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n ) -> HttpResponse:\n- if display_in_profile_summary and display_in_profile_summary_limit_reached():\n+ if display_in_profile_summary and display_in_profile_summary_limit_reached(user_profile.realm):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n@@ -213,7 +215,9 @@\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n \n- if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):\n+ if display_in_profile_summary and display_in_profile_summary_limit_reached(\n+ user_profile.realm, field.id\n+ ):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n", "issue": "Save failed: Only 2 custom profile fields can be displayed in the profile summary.\nI tried to set a custom profile field to be displayed in the profile summary. However, it fails with an error message \"Save failed: Only 2 custom profile fields can be displayed in the profile summary.\" even though no custom profile field has been set to displayed.\r\n\r\nhttps://user-images.githubusercontent.com/2891235/198561250-59519f45-85a4-4490-8f72-9fae343930be.mov\r\n\r\n\r\nServer: hosted Zulip server\r\n```console\r\nZulip Server\r\nVersion 6.0-dev-2021-gd461383c1f \r\nForked from upstream at 6.0-dev-2009-g0affc7ac6f\r\n```\r\n\r\nClient: Zulip Desktop v5.9.3\n", "before_files": [{"content": "from typing import List, Optional, cast\n\nimport orjson\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom zerver.actions.custom_profile_fields import (\n check_remove_custom_profile_field_value,\n do_remove_realm_custom_profile_field,\n do_update_user_custom_profile_data_if_changed,\n try_add_realm_custom_profile_field,\n try_add_realm_default_custom_profile_field,\n try_reorder_realm_custom_profile_fields,\n try_update_realm_custom_profile_field,\n)\nfrom zerver.decorator import human_users_only, require_realm_admin\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.external_accounts import validate_external_account_field_data\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.types import ProfileDataElementUpdateDict, ProfileFieldData, Validator\nfrom zerver.lib.users import validate_user_custom_profile_data\nfrom zerver.lib.validator import (\n check_bool,\n check_capped_string,\n check_dict,\n check_dict_only,\n check_int,\n check_list,\n check_string,\n check_union,\n validate_select_field_data,\n)\nfrom zerver.models import CustomProfileField, UserProfile, custom_profile_fields_for_realm\n\n\ndef list_realm_custom_profile_fields(\n request: HttpRequest, user_profile: UserProfile\n) -> HttpResponse:\n fields = custom_profile_fields_for_realm(user_profile.realm_id)\n return json_success(request, data={\"custom_fields\": [f.as_dict() for f in fields]})\n\n\nhint_validator = check_capped_string(CustomProfileField.HINT_MAX_LENGTH)\nname_validator = check_capped_string(CustomProfileField.NAME_MAX_LENGTH)\n\n\ndef validate_field_name_and_hint(name: str, hint: str) -> None:\n if not name.strip():\n raise JsonableError(_(\"Label cannot be blank.\"))\n\n try:\n hint_validator(\"hint\", hint)\n name_validator(\"name\", name)\n except ValidationError as error:\n raise JsonableError(error.message)\n\n\ndef validate_custom_field_data(field_type: int, field_data: ProfileFieldData) -> None:\n try:\n if field_type == CustomProfileField.SELECT:\n # Choice type field must have at least have one choice\n if len(field_data) < 1:\n raise JsonableError(_(\"Field must have at least one choice.\"))\n validate_select_field_data(field_data)\n elif field_type == CustomProfileField.EXTERNAL_ACCOUNT:\n validate_external_account_field_data(field_data)\n except ValidationError as error:\n raise JsonableError(error.message)\n\n\ndef validate_display_in_profile_summary_field(\n field_type: int, display_in_profile_summary: bool\n) -> None:\n if not display_in_profile_summary:\n return\n\n # The LONG_TEXT field type doesn't make sense visually for profile\n # field summaries. The USER field type will require some further\n # client support.\n if field_type == CustomProfileField.LONG_TEXT or field_type == CustomProfileField.USER:\n raise JsonableError(_(\"Field type not supported for display in profile summary.\"))\n\n\ndef is_default_external_field(field_type: int, field_data: ProfileFieldData) -> bool:\n if field_type != CustomProfileField.EXTERNAL_ACCOUNT:\n return False\n if field_data[\"subtype\"] == \"custom\":\n return False\n return True\n\n\ndef validate_custom_profile_field(\n name: str,\n hint: str,\n field_type: int,\n field_data: ProfileFieldData,\n display_in_profile_summary: bool,\n) -> None:\n # Validate field data\n validate_custom_field_data(field_type, field_data)\n\n if not is_default_external_field(field_type, field_data):\n # If field is default external field then we will fetch all data\n # from our default field dictionary, so no need to validate name or hint\n # Validate field name, hint if not default external account field\n validate_field_name_and_hint(name, hint)\n\n field_types = [i[0] for i in CustomProfileField.FIELD_TYPE_CHOICES]\n if field_type not in field_types:\n raise JsonableError(_(\"Invalid field type.\"))\n\n validate_display_in_profile_summary_field(field_type, display_in_profile_summary)\n\n\ncheck_profile_field_data: Validator[ProfileFieldData] = check_dict(\n value_validator=check_union([check_dict(value_validator=check_string), check_string])\n)\n\n\ndef update_only_display_in_profile_summary(\n requested_name: str,\n requested_hint: str,\n requested_field_data: ProfileFieldData,\n existing_field: CustomProfileField,\n) -> bool:\n if (\n requested_name != existing_field.name\n or requested_hint != existing_field.hint\n or requested_field_data != orjson.loads(existing_field.field_data)\n ):\n return False\n return True\n\n\ndef display_in_profile_summary_limit_reached(profile_field_id: Optional[int] = None) -> bool:\n query = CustomProfileField.objects.filter(display_in_profile_summary=True)\n if profile_field_id is not None:\n query = query.exclude(id=profile_field_id)\n return query.count() >= CustomProfileField.MAX_DISPLAY_IN_PROFILE_SUMMARY_FIELDS\n\n\n@require_realm_admin\n@has_request_variables\ndef create_realm_custom_profile_field(\n request: HttpRequest,\n user_profile: UserProfile,\n name: str = REQ(default=\"\", converter=lambda var_name, x: x.strip()),\n hint: str = REQ(default=\"\"),\n field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),\n field_type: int = REQ(json_validator=check_int),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n) -> HttpResponse:\n if display_in_profile_summary and display_in_profile_summary_limit_reached():\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n\n validate_custom_profile_field(name, hint, field_type, field_data, display_in_profile_summary)\n try:\n if is_default_external_field(field_type, field_data):\n field_subtype = field_data[\"subtype\"]\n assert isinstance(field_subtype, str)\n field = try_add_realm_default_custom_profile_field(\n realm=user_profile.realm,\n field_subtype=field_subtype,\n display_in_profile_summary=display_in_profile_summary,\n )\n return json_success(request, data={\"id\": field.id})\n else:\n field = try_add_realm_custom_profile_field(\n realm=user_profile.realm,\n name=name,\n field_data=field_data,\n field_type=field_type,\n hint=hint,\n display_in_profile_summary=display_in_profile_summary,\n )\n return json_success(request, data={\"id\": field.id})\n except IntegrityError:\n raise JsonableError(_(\"A field with that label already exists.\"))\n\n\n@require_realm_admin\ndef delete_realm_custom_profile_field(\n request: HttpRequest, user_profile: UserProfile, field_id: int\n) -> HttpResponse:\n try:\n field = CustomProfileField.objects.get(id=field_id)\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n\n do_remove_realm_custom_profile_field(realm=user_profile.realm, field=field)\n return json_success(request)\n\n\n@require_realm_admin\n@has_request_variables\ndef update_realm_custom_profile_field(\n request: HttpRequest,\n user_profile: UserProfile,\n field_id: int,\n name: str = REQ(default=\"\", converter=lambda var_name, x: x.strip()),\n hint: str = REQ(default=\"\"),\n field_data: ProfileFieldData = REQ(default={}, json_validator=check_profile_field_data),\n display_in_profile_summary: bool = REQ(default=False, json_validator=check_bool),\n) -> HttpResponse:\n realm = user_profile.realm\n try:\n field = CustomProfileField.objects.get(realm=realm, id=field_id)\n except CustomProfileField.DoesNotExist:\n raise JsonableError(_(\"Field id {id} not found.\").format(id=field_id))\n\n if display_in_profile_summary and display_in_profile_summary_limit_reached(field.id):\n raise JsonableError(\n _(\"Only 2 custom profile fields can be displayed in the profile summary.\")\n )\n\n if field.field_type == CustomProfileField.EXTERNAL_ACCOUNT:\n # HACK: Allow changing the display_in_profile_summary property\n # of default external account types, but not any others.\n #\n # TODO: Make the name/hint/field_data parameters optional, and\n # just require that None was passed for all of them for this case.\n if is_default_external_field(\n field.field_type, orjson.loads(field.field_data)\n ) and not update_only_display_in_profile_summary(name, hint, field_data, field):\n raise JsonableError(_(\"Default custom field cannot be updated.\"))\n\n validate_custom_profile_field(\n name, hint, field.field_type, field_data, display_in_profile_summary\n )\n try:\n try_update_realm_custom_profile_field(\n realm,\n field,\n name,\n hint=hint,\n field_data=field_data,\n display_in_profile_summary=display_in_profile_summary,\n )\n except IntegrityError:\n raise JsonableError(_(\"A field with that label already exists.\"))\n return json_success(request)\n\n\n@require_realm_admin\n@has_request_variables\ndef reorder_realm_custom_profile_fields(\n request: HttpRequest,\n user_profile: UserProfile,\n order: List[int] = REQ(json_validator=check_list(check_int)),\n) -> HttpResponse:\n try_reorder_realm_custom_profile_fields(user_profile.realm, order)\n return json_success(request)\n\n\n@human_users_only\n@has_request_variables\ndef remove_user_custom_profile_data(\n request: HttpRequest,\n user_profile: UserProfile,\n data: List[int] = REQ(json_validator=check_list(check_int)),\n) -> HttpResponse:\n for field_id in data:\n check_remove_custom_profile_field_value(user_profile, field_id)\n return json_success(request)\n\n\ncheck_profile_data_element_update_dict = cast(\n Validator[ProfileDataElementUpdateDict],\n check_dict_only(\n [\n (\"id\", check_int),\n (\"value\", check_union([check_string, check_list(check_int)])),\n ]\n ),\n)\n\n\n@human_users_only\n@has_request_variables\ndef update_user_custom_profile_data(\n request: HttpRequest,\n user_profile: UserProfile,\n data: List[ProfileDataElementUpdateDict] = REQ(\n json_validator=check_list(\n check_profile_data_element_update_dict,\n )\n ),\n) -> HttpResponse:\n\n validate_user_custom_profile_data(user_profile.realm.id, data)\n do_update_user_custom_profile_data_if_changed(user_profile, data)\n # We need to call this explicitly otherwise constraints are not check\n return json_success(request)\n", "path": "zerver/views/custom_profile_fields.py"}]} | 3,841 | 474 |
gh_patches_debug_13361 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-5230 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MacOS onefile package, GUI does not display in High Resolution mode by default. Now it's 2019, so I suggest make the 'NSHighResolutionCapable' option a default.
Hi dear developers,
I found a small issue on mac os x, after running the command 'pyinstaller --onefile --noconsole myscript.py', there are 2 generated files, one is a Unix executable, the other one is a mac os APP PACKAGE.
My program uses Tkinter for GUI, When the program is launched from the unix executable, it displays in the high resolution mode like a normal mac os app. But when launched from the APP Package, it will display in a low resolution mode, which looks really strange on today's Mac.
After a little search, I found this topic in Stackoverflow, https://stackoverflow.com/questions/40484659/pyinstaller-tkinter-window-low-resolution-in-app-bundle-but-not-in-app-program , added some configs as this topic suggests:
app = BUNDLE(exe,
name='myscript.app',
icon=None,
bundle_identifier=None
**info_plist={
'NSHighResolutionCapable': 'True'
},**
)
and my program launched from package file now just displayed fine.
So it's all about the **NSHighResolutionCapable** option is disabled or not set by default.
Though my problem is solved, the reason why I still want to open this Issue is, **now it's 2019, in the MacWorld, Retina or High Resolution is already the mainstream,** almost all the Mac products are equipped with a Retina display, and people buy mac for its crisp display.
Even if you are using an external display, now the 4k is the mainstream, there's no reason to hook a low resolution display especially on a Mac except you already have one and want to still use it.
So I suggest, make this option a DEFAULT in Mac OS version, since the command line executable displays in a high res mode, there's no reason why the App Package not. And high resolution is always what most folks expected.
Make high resolution a default, no more tweaks, no more searches.
Consider this!
</issue>
<code>
[start of PyInstaller/building/osx.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import os
13 import plistlib
14 import shutil
15 from ..compat import is_darwin
16 from .api import EXE, COLLECT
17 from .datastruct import Target, TOC, logger
18 from .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache
19
20
21
22 class BUNDLE(Target):
23 def __init__(self, *args, **kws):
24 from ..config import CONF
25
26 # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms
27 if not is_darwin:
28 return
29
30 # get a path to a .icns icon for the app bundle.
31 self.icon = kws.get('icon')
32 if not self.icon:
33 # --icon not specified; use the default in the pyinstaller folder
34 self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),
35 'bootloader', 'images', 'icon-windowed.icns')
36 else:
37 # user gave an --icon=path. If it is relative, make it
38 # relative to the spec file location.
39 if not os.path.isabs(self.icon):
40 self.icon = os.path.join(CONF['specpath'], self.icon)
41 # ensure icon path is absolute
42 self.icon = os.path.abspath(self.icon)
43
44 Target.__init__(self)
45
46 # .app bundle is created in DISTPATH.
47 self.name = kws.get('name', None)
48 base_name = os.path.basename(self.name)
49 self.name = os.path.join(CONF['distpath'], base_name)
50
51 self.appname = os.path.splitext(base_name)[0]
52 self.version = kws.get("version", "0.0.0")
53 self.toc = TOC()
54 self.strip = False
55 self.upx = False
56 self.console = True
57
58 # .app bundle identifier for Code Signing
59 self.bundle_identifier = kws.get('bundle_identifier')
60 if not self.bundle_identifier:
61 # Fallback to appname.
62 self.bundle_identifier = self.appname
63
64 self.info_plist = kws.get('info_plist', None)
65
66 for arg in args:
67 if isinstance(arg, EXE):
68 self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
69 self.toc.extend(arg.dependencies)
70 self.strip = arg.strip
71 self.upx = arg.upx
72 self.upx_exclude = arg.upx_exclude
73 self.console = arg.console
74 elif isinstance(arg, TOC):
75 self.toc.extend(arg)
76 # TOC doesn't have a strip or upx attribute, so there is no way for us to
77 # tell which cache we should draw from.
78 elif isinstance(arg, COLLECT):
79 self.toc.extend(arg.toc)
80 self.strip = arg.strip_binaries
81 self.upx = arg.upx_binaries
82 self.upx_exclude = arg.upx_exclude
83 self.console = arg.console
84 else:
85 logger.info("unsupported entry %s", arg.__class__.__name__)
86 # Now, find values for app filepath (name), app name (appname), and name
87 # of the actual executable (exename) from the first EXECUTABLE item in
88 # toc, which might have come from a COLLECT too (not from an EXE).
89 for inm, name, typ in self.toc:
90 if typ == "EXECUTABLE":
91 self.exename = name
92 if self.name is None:
93 self.appname = "Mac%s" % (os.path.splitext(inm)[0],)
94 self.name = os.path.join(CONF['specpath'], self.appname + ".app")
95 else:
96 self.name = os.path.join(CONF['specpath'], self.name)
97 break
98 self.__postinit__()
99
100 _GUTS = (
101 # BUNDLE always builds, just want the toc to be written out
102 ('toc', None),
103 )
104
105 def _check_guts(self, data, last_build):
106 # BUNDLE always needs to be executed, since it will clean the output
107 # directory anyway to make sure there is no existing cruft accumulating
108 return 1
109
110 def assemble(self):
111 if _check_path_overlap(self.name) and os.path.isdir(self.name):
112 _rmtree(self.name)
113 logger.info("Building BUNDLE %s", self.tocbasename)
114
115 # Create a minimal Mac bundle structure
116 os.makedirs(os.path.join(self.name, "Contents", "MacOS"))
117 os.makedirs(os.path.join(self.name, "Contents", "Resources"))
118 os.makedirs(os.path.join(self.name, "Contents", "Frameworks"))
119
120 # Copy icns icon to Resources directory.
121 if os.path.exists(self.icon):
122 shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))
123 else:
124 logger.warning("icon not found %s", self.icon)
125
126 # Key/values for a minimal Info.plist file
127 info_plist_dict = {"CFBundleDisplayName": self.appname,
128 "CFBundleName": self.appname,
129
130 # Required by 'codesign' utility.
131 # The value for CFBundleIdentifier is used as the default unique
132 # name of your program for Code Signing purposes.
133 # It even identifies the APP for access to restricted OS X areas
134 # like Keychain.
135 #
136 # The identifier used for signing must be globally unique. The usal
137 # form for this identifier is a hierarchical name in reverse DNS
138 # notation, starting with the toplevel domain, followed by the
139 # company name, followed by the department within the company, and
140 # ending with the product name. Usually in the form:
141 # com.mycompany.department.appname
142 # Cli option --osx-bundle-identifier sets this value.
143 "CFBundleIdentifier": self.bundle_identifier,
144
145 # Fix for #156 - 'MacOS' must be in the name - not sure why
146 "CFBundleExecutable": 'MacOS/%s' % os.path.basename(self.exename),
147 "CFBundleIconFile": os.path.basename(self.icon),
148 "CFBundleInfoDictionaryVersion": "6.0",
149 "CFBundlePackageType": "APPL",
150 "CFBundleShortVersionString": self.version,
151
152 }
153
154 # Setting EXE console=True implies LSBackgroundOnly=True.
155 # But it still can be overwrite by the user.
156 if self.console:
157 info_plist_dict['LSBackgroundOnly'] = True
158
159 # Merge info_plist settings from spec file
160 if isinstance(self.info_plist, dict) and self.info_plist:
161 info_plist_dict.update(self.info_plist)
162
163 plist_filename = os.path.join(self.name, "Contents", "Info.plist")
164 with open(plist_filename, "wb") as plist_fh:
165 plistlib.dump(info_plist_dict, plist_fh)
166
167 links = []
168 toc = add_suffix_to_extensions(self.toc)
169 for inm, fnm, typ in toc:
170 # Copy files from cache. This ensures that are used files with relative
171 # paths to dynamic library dependencies (@executable_path)
172 base_path = inm.split('/', 1)[0]
173 if typ in ('EXTENSION', 'BINARY'):
174 fnm = checkCache(fnm, strip=self.strip, upx=self.upx,
175 upx_exclude=self.upx_exclude, dist_nm=inm)
176 # Add most data files to a list for symlinking later.
177 if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):
178 links.append((inm, fnm))
179 else:
180 tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
181 todir = os.path.dirname(tofnm)
182 if not os.path.exists(todir):
183 os.makedirs(todir)
184 if os.path.isdir(fnm):
185 # beacuse shutil.copy2() is the default copy function
186 # for shutil.copytree, this will also copy file metadata
187 shutil.copytree(fnm, tofnm)
188 else:
189 shutil.copy(fnm, tofnm)
190
191 logger.info('moving BUNDLE data files to Resource directory')
192
193 # Mac OS X Code Signing does not work when .app bundle contains
194 # data files in dir ./Contents/MacOS.
195 #
196 # Put all data files in ./Resources and create symlinks in ./MacOS.
197 bin_dir = os.path.join(self.name, 'Contents', 'MacOS')
198 res_dir = os.path.join(self.name, 'Contents', 'Resources')
199 for inm, fnm in links:
200 tofnm = os.path.join(res_dir, inm)
201 todir = os.path.dirname(tofnm)
202 if not os.path.exists(todir):
203 os.makedirs(todir)
204 if os.path.isdir(fnm):
205 # beacuse shutil.copy2() is the default copy function
206 # for shutil.copytree, this will also copy file metadata
207 shutil.copytree(fnm, tofnm)
208 else:
209 shutil.copy(fnm, tofnm)
210 base_path = os.path.split(inm)[0]
211 if base_path:
212 if not os.path.exists(os.path.join(bin_dir, inm)):
213 path = ''
214 for part in iter(base_path.split(os.path.sep)):
215 # Build path from previous path and the next part of the base path
216 path = os.path.join(path, part)
217 try:
218 relative_source_path = os.path.relpath(os.path.join(res_dir, path),
219 os.path.split(os.path.join(bin_dir, path))[0])
220 dest_path = os.path.join(bin_dir, path)
221 os.symlink(relative_source_path, dest_path)
222 break
223 except FileExistsError:
224 pass
225 if not os.path.exists(os.path.join(bin_dir, inm)):
226 relative_source_path = os.path.relpath(os.path.join(res_dir, inm),
227 os.path.split(os.path.join(bin_dir, inm))[0])
228 dest_path = os.path.join(bin_dir, inm)
229 os.symlink(relative_source_path, dest_path)
230 else: # If path is empty, e.g., a top level file, try to just symlink the file
231 os.symlink(os.path.relpath(os.path.join(res_dir, inm),
232 os.path.split(os.path.join(bin_dir, inm))[0]),
233 os.path.join(bin_dir, inm))
234
[end of PyInstaller/building/osx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/building/osx.py b/PyInstaller/building/osx.py
--- a/PyInstaller/building/osx.py
+++ b/PyInstaller/building/osx.py
@@ -151,10 +151,14 @@
}
- # Setting EXE console=True implies LSBackgroundOnly=True.
- # But it still can be overwrite by the user.
+ # Set some default values.
+ # But they still can be overwritten by the user.
if self.console:
+ # Setting EXE console=True implies LSBackgroundOnly=True.
info_plist_dict['LSBackgroundOnly'] = True
+ else:
+ # Let's use high resolution by default.
+ info_plist_dict['NSHighResolutionCapable'] = True
# Merge info_plist settings from spec file
if isinstance(self.info_plist, dict) and self.info_plist:
| {"golden_diff": "diff --git a/PyInstaller/building/osx.py b/PyInstaller/building/osx.py\n--- a/PyInstaller/building/osx.py\n+++ b/PyInstaller/building/osx.py\n@@ -151,10 +151,14 @@\n \n }\n \n- # Setting EXE console=True implies LSBackgroundOnly=True.\n- # But it still can be overwrite by the user.\n+ # Set some default values.\n+ # But they still can be overwritten by the user.\n if self.console:\n+ # Setting EXE console=True implies LSBackgroundOnly=True.\n info_plist_dict['LSBackgroundOnly'] = True\n+ else:\n+ # Let's use high resolution by default.\n+ info_plist_dict['NSHighResolutionCapable'] = True\n \n # Merge info_plist settings from spec file\n if isinstance(self.info_plist, dict) and self.info_plist:\n", "issue": "MacOS onefile package, GUI does not display in High Resolution mode by default. Now it's 2019, so I suggest make the 'NSHighResolutionCapable' option a default.\nHi dear developers,\r\n\r\nI found a small issue on mac os x, after running the command 'pyinstaller --onefile --noconsole myscript.py', there are 2 generated files, one is a Unix executable, the other one is a mac os APP PACKAGE. \r\n\r\nMy program uses Tkinter for GUI, When the program is launched from the unix executable, it displays in the high resolution mode like a normal mac os app. But when launched from the APP Package, it will display in a low resolution mode, which looks really strange on today's Mac.\r\n\r\nAfter a little search, I found this topic in Stackoverflow, https://stackoverflow.com/questions/40484659/pyinstaller-tkinter-window-low-resolution-in-app-bundle-but-not-in-app-program , added some configs as this topic suggests:\r\n\r\napp = BUNDLE(exe,\r\n name='myscript.app',\r\n icon=None,\r\n bundle_identifier=None\r\n **info_plist={\r\n 'NSHighResolutionCapable': 'True'\r\n },**\r\n )\r\n\r\nand my program launched from package file now just displayed fine.\r\nSo it's all about the **NSHighResolutionCapable** option is disabled or not set by default.\r\n\r\n\r\nThough my problem is solved, the reason why I still want to open this Issue is, **now it's 2019, in the MacWorld, Retina or High Resolution is already the mainstream,** almost all the Mac products are equipped with a Retina display, and people buy mac for its crisp display. \r\nEven if you are using an external display, now the 4k is the mainstream, there's no reason to hook a low resolution display especially on a Mac except you already have one and want to still use it.\r\n\r\nSo I suggest, make this option a DEFAULT in Mac OS version, since the command line executable displays in a high res mode, there's no reason why the App Package not. And high resolution is always what most folks expected. \r\n\r\nMake high resolution a default, no more tweaks, no more searches.\r\n\r\nConsider this!\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport os\nimport plistlib\nimport shutil\nfrom ..compat import is_darwin\nfrom .api import EXE, COLLECT\nfrom .datastruct import Target, TOC, logger\nfrom .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache\n\n\n\nclass BUNDLE(Target):\n def __init__(self, *args, **kws):\n from ..config import CONF\n\n # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms\n if not is_darwin:\n return\n\n # get a path to a .icns icon for the app bundle.\n self.icon = kws.get('icon')\n if not self.icon:\n # --icon not specified; use the default in the pyinstaller folder\n self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'bootloader', 'images', 'icon-windowed.icns')\n else:\n # user gave an --icon=path. If it is relative, make it\n # relative to the spec file location.\n if not os.path.isabs(self.icon):\n self.icon = os.path.join(CONF['specpath'], self.icon)\n # ensure icon path is absolute\n self.icon = os.path.abspath(self.icon)\n\n Target.__init__(self)\n\n # .app bundle is created in DISTPATH.\n self.name = kws.get('name', None)\n base_name = os.path.basename(self.name)\n self.name = os.path.join(CONF['distpath'], base_name)\n\n self.appname = os.path.splitext(base_name)[0]\n self.version = kws.get(\"version\", \"0.0.0\")\n self.toc = TOC()\n self.strip = False\n self.upx = False\n self.console = True\n\n # .app bundle identifier for Code Signing\n self.bundle_identifier = kws.get('bundle_identifier')\n if not self.bundle_identifier:\n # Fallback to appname.\n self.bundle_identifier = self.appname\n\n self.info_plist = kws.get('info_plist', None)\n\n for arg in args:\n if isinstance(arg, EXE):\n self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))\n self.toc.extend(arg.dependencies)\n self.strip = arg.strip\n self.upx = arg.upx\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n elif isinstance(arg, TOC):\n self.toc.extend(arg)\n # TOC doesn't have a strip or upx attribute, so there is no way for us to\n # tell which cache we should draw from.\n elif isinstance(arg, COLLECT):\n self.toc.extend(arg.toc)\n self.strip = arg.strip_binaries\n self.upx = arg.upx_binaries\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n else:\n logger.info(\"unsupported entry %s\", arg.__class__.__name__)\n # Now, find values for app filepath (name), app name (appname), and name\n # of the actual executable (exename) from the first EXECUTABLE item in\n # toc, which might have come from a COLLECT too (not from an EXE).\n for inm, name, typ in self.toc:\n if typ == \"EXECUTABLE\":\n self.exename = name\n if self.name is None:\n self.appname = \"Mac%s\" % (os.path.splitext(inm)[0],)\n self.name = os.path.join(CONF['specpath'], self.appname + \".app\")\n else:\n self.name = os.path.join(CONF['specpath'], self.name)\n break\n self.__postinit__()\n\n _GUTS = (\n # BUNDLE always builds, just want the toc to be written out\n ('toc', None),\n )\n\n def _check_guts(self, data, last_build):\n # BUNDLE always needs to be executed, since it will clean the output\n # directory anyway to make sure there is no existing cruft accumulating\n return 1\n\n def assemble(self):\n if _check_path_overlap(self.name) and os.path.isdir(self.name):\n _rmtree(self.name)\n logger.info(\"Building BUNDLE %s\", self.tocbasename)\n\n # Create a minimal Mac bundle structure\n os.makedirs(os.path.join(self.name, \"Contents\", \"MacOS\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Resources\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Frameworks\"))\n\n # Copy icns icon to Resources directory.\n if os.path.exists(self.icon):\n shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))\n else:\n logger.warning(\"icon not found %s\", self.icon)\n\n # Key/values for a minimal Info.plist file\n info_plist_dict = {\"CFBundleDisplayName\": self.appname,\n \"CFBundleName\": self.appname,\n\n # Required by 'codesign' utility.\n # The value for CFBundleIdentifier is used as the default unique\n # name of your program for Code Signing purposes.\n # It even identifies the APP for access to restricted OS X areas\n # like Keychain.\n #\n # The identifier used for signing must be globally unique. The usal\n # form for this identifier is a hierarchical name in reverse DNS\n # notation, starting with the toplevel domain, followed by the\n # company name, followed by the department within the company, and\n # ending with the product name. Usually in the form:\n # com.mycompany.department.appname\n # Cli option --osx-bundle-identifier sets this value.\n \"CFBundleIdentifier\": self.bundle_identifier,\n\n # Fix for #156 - 'MacOS' must be in the name - not sure why\n \"CFBundleExecutable\": 'MacOS/%s' % os.path.basename(self.exename),\n \"CFBundleIconFile\": os.path.basename(self.icon),\n \"CFBundleInfoDictionaryVersion\": \"6.0\",\n \"CFBundlePackageType\": \"APPL\",\n \"CFBundleShortVersionString\": self.version,\n\n }\n\n # Setting EXE console=True implies LSBackgroundOnly=True.\n # But it still can be overwrite by the user.\n if self.console:\n info_plist_dict['LSBackgroundOnly'] = True\n\n # Merge info_plist settings from spec file\n if isinstance(self.info_plist, dict) and self.info_plist:\n info_plist_dict.update(self.info_plist)\n\n plist_filename = os.path.join(self.name, \"Contents\", \"Info.plist\")\n with open(plist_filename, \"wb\") as plist_fh:\n plistlib.dump(info_plist_dict, plist_fh)\n\n links = []\n toc = add_suffix_to_extensions(self.toc)\n for inm, fnm, typ in toc:\n # Copy files from cache. This ensures that are used files with relative\n # paths to dynamic library dependencies (@executable_path)\n base_path = inm.split('/', 1)[0]\n if typ in ('EXTENSION', 'BINARY'):\n fnm = checkCache(fnm, strip=self.strip, upx=self.upx,\n upx_exclude=self.upx_exclude, dist_nm=inm)\n # Add most data files to a list for symlinking later.\n if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):\n links.append((inm, fnm))\n else:\n tofnm = os.path.join(self.name, \"Contents\", \"MacOS\", inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n\n logger.info('moving BUNDLE data files to Resource directory')\n\n # Mac OS X Code Signing does not work when .app bundle contains\n # data files in dir ./Contents/MacOS.\n #\n # Put all data files in ./Resources and create symlinks in ./MacOS.\n bin_dir = os.path.join(self.name, 'Contents', 'MacOS')\n res_dir = os.path.join(self.name, 'Contents', 'Resources')\n for inm, fnm in links:\n tofnm = os.path.join(res_dir, inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n base_path = os.path.split(inm)[0]\n if base_path:\n if not os.path.exists(os.path.join(bin_dir, inm)):\n path = ''\n for part in iter(base_path.split(os.path.sep)):\n # Build path from previous path and the next part of the base path\n path = os.path.join(path, part)\n try:\n relative_source_path = os.path.relpath(os.path.join(res_dir, path),\n os.path.split(os.path.join(bin_dir, path))[0])\n dest_path = os.path.join(bin_dir, path)\n os.symlink(relative_source_path, dest_path)\n break\n except FileExistsError:\n pass\n if not os.path.exists(os.path.join(bin_dir, inm)):\n relative_source_path = os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0])\n dest_path = os.path.join(bin_dir, inm)\n os.symlink(relative_source_path, dest_path)\n else: # If path is empty, e.g., a top level file, try to just symlink the file\n os.symlink(os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0]),\n os.path.join(bin_dir, inm))\n", "path": "PyInstaller/building/osx.py"}]} | 3,942 | 202 |
gh_patches_debug_17879 | rasdani/github-patches | git_diff | conan-io__conan-center-index-14218 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] qwt/6.2.0: Unresolvable conflict between requires and build_requires
### Description
Using https://github.com/EstebanDugueperoux2/qwtExample example, today I get following error:
```
WARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q
WARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q
ERROR: Conflict in qt/5.15.5:
'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.
To fix this conflict you need to override the package 'openssl' in your root package.
```
Even by overriding openssl dependency because it is a conflict between a require and a build_require.
See https://github.com/conan-io/conan/issues/4753.
### Package and Environment Details
* Package Name/Version: **qwt/6.2.0**
* Operating System+version: **conanio/gcc8**
* Compiler+version: **GCC 8**
* Docker image: **conanio/gcc8**
* Conan version: **conan 1.54.0**
* Python version: **Python 3.7.13**
### Conan profile
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=8
compiler.libcxx=libstdc++11
build_type=Release
# tests can be linked with gtest libraries only if using same build_type,
# see https://stackoverflow.com/questions/41408216/gtest-installed-with-conan-undefined-reference-to-testinginternalgetboolas/41409311#41409311
gtest:build_type=Debug
[options]
[build_requires]
[env]
### Steps to reproduce
docker run --rm -ti -v ${PWD}:/home/conan/project conanio/gcc8
cd project
export OPTIONS="--profile:build .conan/profiles/gcc8 --profile:host .conan/profiles/gcc8 -s qwt_example:build_type=Debug"
export PACKAGE_VERSION=`conan inspect . --raw version`
conan install . --install-folder=build --build missing $OPTIONS
### Logs
<details><summary>Click to expand log</summary>
```
WARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q
WARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q
ERROR: Conflict in qt/5.15.5:
'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.
To fix this conflict you need to override the package 'openssl' in your root package.
```
</details>
</issue>
<code>
[start of recipes/qwt/all/conanfile.py]
1 from conan import ConanFile
2 from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
3 from conan.tools.files import apply_conandata_patches, copy, get, rmdir
4 from conan.tools.build import cross_building
5 from conan.errors import ConanInvalidConfiguration
6 import os
7
8 required_conan_version = ">=1.50"
9
10 class QwtConan(ConanFile):
11 name = "qwt"
12 license = "LGPL-2.1-or-later"
13 url = "https://github.com/conan-io/conan-center-index"
14 homepage = "https://qwt.sourceforge.io/"
15 topics = ("conan", "archive", "compression")
16 description = (
17 "The Qwt library contains GUI Components and utility classes which are primarily useful for programs "
18 "with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, "
19 "thermometers, wheels and knobs to control or display values, arrays, or ranges of type double."
20 )
21 settings = "os", "compiler", "build_type", "arch"
22 options = {
23 "shared": [True, False],
24 "fPIC": [True, False],
25 "plot": [True, False],
26 "widgets": [True, False],
27 "svg": [True, False],
28 "opengl": [True, False],
29 "designer": [True, False],
30 "polar": [True, False],
31 "playground": [True, False],
32 "examples": [True, False],
33 "test": [True, False],
34 }
35 default_options = {
36 "shared": False,
37 "fPIC": True,
38 "plot": True,
39 "widgets": True,
40 "svg": False,
41 "opengl": True,
42 "designer": False,
43 "polar": True,
44 "playground": False,
45 "examples": False,
46 "test": False
47 }
48
49 tool_requires = (
50 "cmake/3.23.2",
51 "ninja/1.11.0"
52 )
53
54 def _patch_sources(self):
55 apply_conandata_patches(self)
56
57 def export_sources(self):
58 for p in self.conan_data.get("patches", {}).get(self.version, []):
59 copy(self, p["patch_file"], self.recipe_folder, self.export_sources_folder)
60
61
62 def requirements(self):
63 self.requires("qt/5.15.5")
64
65 def build_requirements(self):
66 if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
67 self.build_requires("jom/1.1.3")
68 self.tool_requires("qt/5.15.5")
69
70 def validate(self):
71 if hasattr(self, "settings_build") and cross_building(self, skip_x64_x86=True):
72 raise ConanInvalidConfiguration("Qwt recipe does not support cross-compilation yet")
73
74 def config_options(self):
75 if self.settings.os == "Windows":
76 del self.options.fPIC
77
78 def configure(self):
79 if self.options.shared:
80 del self.options.fPIC
81
82 def source(self):
83 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
84
85 def layout(self):
86 cmake_layout(self)
87
88 def generate(self):
89 tc = CMakeToolchain(self, generator="Ninja")
90
91 tc.variables["QWT_DLL"] = "ON" if self.options.shared else "OFF"
92 tc.variables["QWT_STATIC "] = "ON" if not self.options.shared else "OFF"
93 tc.variables["QWT_PLOT"] = "ON" if self.options.plot else "OFF"
94 tc.variables["QWT_WIDGETS"] = "ON" if self.options.widgets else "OFF"
95 tc.variables["QWT_SVG"] = "ON" if self.options.svg else "OFF"
96 tc.variables["QWT_OPENGL"] = "ON" if self.options.opengl else "OFF"
97 tc.variables["QWT_DESIGNER"] = "ON" if self.options.designer else "OFF"
98 tc.variables["QWT_POLAR"] = "ON" if self.options.polar else "OFF"
99 tc.variables["QWT_BUILD_PLAYGROUND"] = "ON" if self.options.playground else "OFF"
100 tc.variables["QWT_BUILD_EXAMPLES"] = "ON" if self.options.examples else "OFF"
101 tc.variables["QWT_BUILD_TESTS"] = "ON" if self.options.test else "OFF"
102 tc.variables["QWT_FRAMEWORK"] = "OFF"
103
104 tc.generate()
105
106 deps = CMakeDeps(self)
107 deps.generate()
108
109 def build(self):
110 self._patch_sources()
111 cmake = CMake(self)
112 cmake.configure()
113 cmake.build()
114
115 if self.options.test:
116 cmake.test()
117
118 def package(self):
119 cmake = CMake(self)
120 cmake.install()
121 rmdir(self, f"{self.package_folder}/lib/pkgconfig")
122 rmdir(self, f"{self.package_folder}/lib/cmake")
123 self.copy("COPYING", src=self.folders.source, dst="licenses")
124
125 def package_info(self):
126 self.cpp_info.libs = ["qwt"]
127 self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'bin'))
128 self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'lib'))
129 self.cpp_info.defines = ['HAVE_QWT', 'QWT_DLL'] if self.options.shared else ['HAVE_QWT']
130 if not self.options.plot:
131 self.cpp_info.defines.append("NO_QWT_PLOT")
132 if not self.options.polar:
133 self.cpp_info.defines.append("NO_QWT_POLAR")
134 if not self.options.widgets:
135 self.cpp_info.defines.append("NO_QWT_WIDGETS")
136 if not self.options.opengl:
137 self.cpp_info.defines.append("NO_QWT_OPENGL")
138 if not self.options.svg:
139 self.cpp_info.defines.append("QWT_NO_SVG")
140
141
[end of recipes/qwt/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/qwt/all/conanfile.py b/recipes/qwt/all/conanfile.py
--- a/recipes/qwt/all/conanfile.py
+++ b/recipes/qwt/all/conanfile.py
@@ -47,8 +47,8 @@
}
tool_requires = (
- "cmake/3.23.2",
- "ninja/1.11.0"
+ "cmake/3.24.2",
+ "ninja/1.11.1"
)
def _patch_sources(self):
@@ -60,12 +60,12 @@
def requirements(self):
- self.requires("qt/5.15.5")
+ self.requires("qt/5.15.7")
def build_requirements(self):
if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
self.build_requires("jom/1.1.3")
- self.tool_requires("qt/5.15.5")
+ self.tool_requires("qt/5.15.7")
def validate(self):
if hasattr(self, "settings_build") and cross_building(self, skip_x64_x86=True):
| {"golden_diff": "diff --git a/recipes/qwt/all/conanfile.py b/recipes/qwt/all/conanfile.py\n--- a/recipes/qwt/all/conanfile.py\n+++ b/recipes/qwt/all/conanfile.py\n@@ -47,8 +47,8 @@\n }\n \n tool_requires = (\n- \"cmake/3.23.2\",\n- \"ninja/1.11.0\"\n+ \"cmake/3.24.2\",\n+ \"ninja/1.11.1\"\n )\n \n def _patch_sources(self):\n@@ -60,12 +60,12 @@\n \n \n def requirements(self):\n- self.requires(\"qt/5.15.5\")\n+ self.requires(\"qt/5.15.7\")\n \n def build_requirements(self):\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"jom/1.1.3\")\n- self.tool_requires(\"qt/5.15.5\") \n+ self.tool_requires(\"qt/5.15.7\") \n \n def validate(self):\n if hasattr(self, \"settings_build\") and cross_building(self, skip_x64_x86=True):\n", "issue": "[package] qwt/6.2.0: Unresolvable conflict between requires and build_requires\n### Description\n\nUsing https://github.com/EstebanDugueperoux2/qwtExample example, today I get following error:\r\n\r\n```\r\nWARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q \r\nWARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q \r\nERROR: Conflict in qt/5.15.5:\r\n 'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.\r\n To fix this conflict you need to override the package 'openssl' in your root package.\r\n```\r\n\r\nEven by overriding openssl dependency because it is a conflict between a require and a build_require.\r\nSee https://github.com/conan-io/conan/issues/4753.\r\n\n\n### Package and Environment Details\n\n* Package Name/Version: **qwt/6.2.0**\r\n* Operating System+version: **conanio/gcc8**\r\n* Compiler+version: **GCC 8**\r\n* Docker image: **conanio/gcc8**\r\n* Conan version: **conan 1.54.0**\r\n* Python version: **Python 3.7.13**\r\n\n\n### Conan profile\n\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=8\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n# tests can be linked with gtest libraries only if using same build_type,\r\n# see https://stackoverflow.com/questions/41408216/gtest-installed-with-conan-undefined-reference-to-testinginternalgetboolas/41409311#41409311\r\ngtest:build_type=Debug\r\n[options]\r\n[build_requires]\r\n[env]\r\n\n\n### Steps to reproduce\n\ndocker run --rm -ti -v ${PWD}:/home/conan/project conanio/gcc8\r\ncd project\r\n\r\nexport OPTIONS=\"--profile:build .conan/profiles/gcc8 --profile:host .conan/profiles/gcc8 -s qwt_example:build_type=Debug\"\r\nexport PACKAGE_VERSION=`conan inspect . --raw version`\r\nconan install . --install-folder=build --build missing $OPTIONS\n\n### Logs\n\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nWARN: qt/5.15.5: requirement openssl/1.1.1s overridden by qwt/6.2.0 to openssl/1.1.1q \r\nWARN: libmysqlclient/8.0.30: requirement openssl/1.1.1s overridden by qt/5.15.5 to openssl/1.1.1q \r\nERROR: Conflict in qt/5.15.5:\r\n 'qt/5.15.5' requires 'openssl/1.1.1s' while 'cmake/3.23.2' requires 'openssl/1.1.1q'.\r\n To fix this conflict you need to override the package 'openssl' in your root package.\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout\nfrom conan.tools.files import apply_conandata_patches, copy, get, rmdir\nfrom conan.tools.build import cross_building\nfrom conan.errors import ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.50\"\n\nclass QwtConan(ConanFile):\n name = \"qwt\"\n license = \"LGPL-2.1-or-later\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://qwt.sourceforge.io/\"\n topics = (\"conan\", \"archive\", \"compression\")\n description = (\n \"The Qwt library contains GUI Components and utility classes which are primarily useful for programs \"\n \"with a technical background. Beside a framework for 2D plots it provides scales, sliders, dials, compasses, \"\n \"thermometers, wheels and knobs to control or display values, arrays, or ranges of type double.\"\n )\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"plot\": [True, False],\n \"widgets\": [True, False],\n \"svg\": [True, False],\n \"opengl\": [True, False],\n \"designer\": [True, False],\n \"polar\": [True, False],\n \"playground\": [True, False],\n \"examples\": [True, False],\n \"test\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"plot\": True,\n \"widgets\": True,\n \"svg\": False,\n \"opengl\": True,\n \"designer\": False,\n \"polar\": True,\n \"playground\": False,\n \"examples\": False,\n \"test\": False\n }\n\n tool_requires = (\n \"cmake/3.23.2\",\n \"ninja/1.11.0\"\n )\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n\n def export_sources(self):\n for p in self.conan_data.get(\"patches\", {}).get(self.version, []):\n copy(self, p[\"patch_file\"], self.recipe_folder, self.export_sources_folder)\n\n\n def requirements(self):\n self.requires(\"qt/5.15.5\")\n\n def build_requirements(self):\n if self.settings.os == \"Windows\" and self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"jom/1.1.3\")\n self.tool_requires(\"qt/5.15.5\") \n\n def validate(self):\n if hasattr(self, \"settings_build\") and cross_building(self, skip_x64_x86=True):\n raise ConanInvalidConfiguration(\"Qwt recipe does not support cross-compilation yet\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def layout(self):\n cmake_layout(self)\n\n def generate(self):\n tc = CMakeToolchain(self, generator=\"Ninja\")\n\n tc.variables[\"QWT_DLL\"] = \"ON\" if self.options.shared else \"OFF\"\n tc.variables[\"QWT_STATIC \"] = \"ON\" if not self.options.shared else \"OFF\"\n tc.variables[\"QWT_PLOT\"] = \"ON\" if self.options.plot else \"OFF\"\n tc.variables[\"QWT_WIDGETS\"] = \"ON\" if self.options.widgets else \"OFF\"\n tc.variables[\"QWT_SVG\"] = \"ON\" if self.options.svg else \"OFF\"\n tc.variables[\"QWT_OPENGL\"] = \"ON\" if self.options.opengl else \"OFF\"\n tc.variables[\"QWT_DESIGNER\"] = \"ON\" if self.options.designer else \"OFF\"\n tc.variables[\"QWT_POLAR\"] = \"ON\" if self.options.polar else \"OFF\"\n tc.variables[\"QWT_BUILD_PLAYGROUND\"] = \"ON\" if self.options.playground else \"OFF\"\n tc.variables[\"QWT_BUILD_EXAMPLES\"] = \"ON\" if self.options.examples else \"OFF\"\n tc.variables[\"QWT_BUILD_TESTS\"] = \"ON\" if self.options.test else \"OFF\"\n tc.variables[\"QWT_FRAMEWORK\"] = \"OFF\"\n\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n if self.options.test:\n cmake.test()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n rmdir(self, f\"{self.package_folder}/lib/pkgconfig\") \n rmdir(self, f\"{self.package_folder}/lib/cmake\")\n self.copy(\"COPYING\", src=self.folders.source, dst=\"licenses\")\n\n def package_info(self):\n self.cpp_info.libs = [\"qwt\"]\n self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'bin'))\n self.env_info.QT_PLUGIN_PATH.append(os.path.join(self.package_folder, 'lib'))\n self.cpp_info.defines = ['HAVE_QWT', 'QWT_DLL'] if self.options.shared else ['HAVE_QWT']\n if not self.options.plot:\n self.cpp_info.defines.append(\"NO_QWT_PLOT\")\n if not self.options.polar:\n self.cpp_info.defines.append(\"NO_QWT_POLAR\")\n if not self.options.widgets:\n self.cpp_info.defines.append(\"NO_QWT_WIDGETS\")\n if not self.options.opengl:\n self.cpp_info.defines.append(\"NO_QWT_OPENGL\")\n if not self.options.svg:\n self.cpp_info.defines.append(\"QWT_NO_SVG\")\n\n", "path": "recipes/qwt/all/conanfile.py"}]} | 2,922 | 277 |
gh_patches_debug_4877 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
updating progress doesn't work
Warning : i see this bug as unlinked with instance, because i saw it on my instance and on bookwyrm.social.
When i go to the book view, i have a block with two progress, but without any print of the pages i've read.

If i want to edit the progress to add pages number read, i have the choice between two dates and the number input. So i fill in the form. with only page number filled.

Then i'm going back to the book view, but with 3 entries instead of two. page number hasn't been saved/printed and it added a new progress instead of editing it.

One problem is you can't save a progress without a comment, but even with that fix, i think progress should indicate the pages read and the pages left to be read. a more "graphic' layout could be very cool ;)
</issue>
<code>
[start of bookwyrm/forms/forms.py]
1 """ using django model forms """
2 from django import forms
3 from django.forms import widgets
4 from django.utils.translation import gettext_lazy as _
5
6 from bookwyrm import models
7 from bookwyrm.models.user import FeedFilterChoices
8 from .custom_form import CustomForm
9
10
11 # pylint: disable=missing-class-docstring
12 class FeedStatusTypesForm(CustomForm):
13 class Meta:
14 model = models.User
15 fields = ["feed_status_types"]
16 help_texts = {f: None for f in fields}
17 widgets = {
18 "feed_status_types": widgets.CheckboxSelectMultiple(
19 choices=FeedFilterChoices,
20 ),
21 }
22
23
24 class ImportForm(forms.Form):
25 csv_file = forms.FileField()
26
27
28 class ShelfForm(CustomForm):
29 class Meta:
30 model = models.Shelf
31 fields = ["user", "name", "privacy", "description"]
32
33
34 class GoalForm(CustomForm):
35 class Meta:
36 model = models.AnnualGoal
37 fields = ["user", "year", "goal", "privacy"]
38
39
40 class ReportForm(CustomForm):
41 class Meta:
42 model = models.Report
43 fields = ["user", "reporter", "status", "links", "note"]
44
45
46 class ReadThroughForm(CustomForm):
47 def clean(self):
48 """make sure the email isn't in use by a registered user"""
49 cleaned_data = super().clean()
50 start_date = cleaned_data.get("start_date")
51 finish_date = cleaned_data.get("finish_date")
52 if start_date and finish_date and start_date > finish_date:
53 self.add_error(
54 "finish_date", _("Reading finish date cannot be before start date.")
55 )
56
57 class Meta:
58 model = models.ReadThrough
59 fields = ["user", "book", "start_date", "finish_date"]
60
[end of bookwyrm/forms/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py
--- a/bookwyrm/forms/forms.py
+++ b/bookwyrm/forms/forms.py
@@ -45,7 +45,7 @@
class ReadThroughForm(CustomForm):
def clean(self):
- """make sure the email isn't in use by a registered user"""
+ """don't let readthroughs end before they start"""
cleaned_data = super().clean()
start_date = cleaned_data.get("start_date")
finish_date = cleaned_data.get("finish_date")
| {"golden_diff": "diff --git a/bookwyrm/forms/forms.py b/bookwyrm/forms/forms.py\n--- a/bookwyrm/forms/forms.py\n+++ b/bookwyrm/forms/forms.py\n@@ -45,7 +45,7 @@\n \n class ReadThroughForm(CustomForm):\n def clean(self):\n- \"\"\"make sure the email isn't in use by a registered user\"\"\"\n+ \"\"\"don't let readthroughs end before they start\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n", "issue": "updating progress doesn't work\nWarning : i see this bug as unlinked with instance, because i saw it on my instance and on bookwyrm.social. \r\n\r\nWhen i go to the book view, i have a block with two progress, but without any print of the pages i've read. \r\n\r\n\r\nIf i want to edit the progress to add pages number read, i have the choice between two dates and the number input. So i fill in the form. with only page number filled.\r\n\r\n\r\nThen i'm going back to the book view, but with 3 entries instead of two. page number hasn't been saved/printed and it added a new progress instead of editing it.\r\n\r\n\r\nOne problem is you can't save a progress without a comment, but even with that fix, i think progress should indicate the pages read and the pages left to be read. a more \"graphic' layout could be very cool ;)\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\nfrom django.forms import widgets\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\nfrom bookwyrm.models.user import FeedFilterChoices\nfrom .custom_form import CustomForm\n\n\n# pylint: disable=missing-class-docstring\nclass FeedStatusTypesForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"feed_status_types\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"feed_status_types\": widgets.CheckboxSelectMultiple(\n choices=FeedFilterChoices,\n ),\n }\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\", \"description\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"status\", \"links\", \"note\"]\n\n\nclass ReadThroughForm(CustomForm):\n def clean(self):\n \"\"\"make sure the email isn't in use by a registered user\"\"\"\n cleaned_data = super().clean()\n start_date = cleaned_data.get(\"start_date\")\n finish_date = cleaned_data.get(\"finish_date\")\n if start_date and finish_date and start_date > finish_date:\n self.add_error(\n \"finish_date\", _(\"Reading finish date cannot be before start date.\")\n )\n\n class Meta:\n model = models.ReadThrough\n fields = [\"user\", \"book\", \"start_date\", \"finish_date\"]\n", "path": "bookwyrm/forms/forms.py"}]} | 1,406 | 122 |
gh_patches_debug_22169 | rasdani/github-patches | git_diff | holoviz__panel-2897 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HoloViews Colorbar not styled for dark themed template
Panel: 0.12.4
The ColorBar is unreadable when using the `FastListTemplate` with the `dark` theme.
I would like this to work for an example I am contributing to the `scikit-image` docs.

```python
import holoviews as hv
import panel as pn
from skimage import data, filters
image = data.coins()
edges = filters.sobel(image)*256
bounds = (-1, -1, 1, 1)
after_img = hv.Image(edges, bounds=bounds).apply.opts(
cmap="binary_r", responsive=True, title="After", active_tools=["box_zoom"], tools=["hover"], colorbar=True
)
layout=pn.panel(after_img, height=800, aspect_ratio=1)
pn.template.FastListTemplate(
site="Panel and Scikit-Image",
main=[layout],
header_background="#292929",
theme="dark",
).servable()
```
## How do I fix this?
I would like to contribute a PR. I assume I would have to update the Bokeh Theme generation here https://github.com/holoviz/panel/blob/396ce2f37473a047e4721a1431e6795374686533/panel/template/fast/theme.py#L62.
I can now see that I need to change to


</issue>
<code>
[start of panel/template/fast/theme.py]
1 """
2 Functionality for styling according to Fast.design
3 """
4 import pathlib
5 import param
6
7 from bokeh.themes import Theme as _BkTheme
8
9 from ..theme import DarkTheme, DefaultTheme
10
11 _ROOT = pathlib.Path(__file__).parent / "css"
12
13 COLLAPSED_SVG_ICON = """
14 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="collapsed-icon">
15 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
16 <path d="M9 5.44446V12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
17 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
18 </svg>
19 """ # noqa
20
21 EXPANDED_SVG_ICON = """
22 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="expanded-icon">
23 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
24 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
25 </svg>
26 """ # noqa
27
28 FONT_URL = "//fonts.googleapis.com/css?family=Open+Sans"
29
30 class FastStyle(param.Parameterized):
31 """
32 The FastStyle class provides the different colors and icons used
33 to style the Fast Templates.
34 """
35
36 background_color = param.String(default="#ffffff")
37 neutral_color = param.String(default="#000000")
38 accent_base_color = param.String(default="#A01346")
39 collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)
40 expanded_icon = param.String(default=EXPANDED_SVG_ICON)
41 color = param.String(default="#00aa41")
42 neutral_fill_card_rest = param.String(default="#F7F7F7")
43 neutral_focus = param.String(default="#888888")
44 neutral_foreground_rest = param.String(default="#2B2B2B")
45
46 header_background = param.String(default="#00aa41")
47 header_neutral_color = param.String(default="#ffffff")
48 header_accent_base_color = param.String(default="#ffffff")
49 header_color = param.String(default="#ffffff")
50 font = param.String(default="Open Sans, sans-serif")
51 font_url = param.String(default=FONT_URL)
52 corner_radius = param.Integer(default=3)
53 shadow = param.Boolean(default=True)
54
55 def create_bokeh_theme(self):
56 """Returns a custom bokeh theme based on the style parameters
57
58 Returns:
59 Dict: A Bokeh Theme
60 """
61
62 return {
63 "attrs": {
64 "Figure": {
65 "background_fill_color": self.background_color,
66 "border_fill_color": self.neutral_fill_card_rest,
67 "border_fill_alpha": 0,
68 "outline_line_color": self.neutral_focus,
69 "outline_line_alpha": 0.5,
70 "outline_line_width": 1,
71 },
72 "Grid": {"grid_line_color": self.neutral_focus, "grid_line_alpha": 0.25},
73 "Axis": {
74 "major_tick_line_alpha": 0.5,
75 "major_tick_line_color": self.neutral_foreground_rest,
76 "minor_tick_line_alpha": 0.25,
77 "minor_tick_line_color": self.neutral_foreground_rest,
78 "axis_line_alpha": 0.1,
79 "axis_line_color": self.neutral_foreground_rest,
80 "major_label_text_color": self.neutral_foreground_rest,
81 "major_label_text_font": self.font,
82 # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed
83 # "major_label_text_font_size": "1.025em",
84 "axis_label_standoff": 10,
85 "axis_label_text_color": self.neutral_foreground_rest,
86 "axis_label_text_font": self.font,
87 "axis_label_text_font_size": "1.25em",
88 "axis_label_text_font_style": "normal",
89 },
90 "Legend": {
91 "spacing": 8,
92 "glyph_width": 15,
93 "label_standoff": 8,
94 "label_text_color": self.neutral_foreground_rest,
95 "label_text_font": self.font,
96 "label_text_font_size": "1.025em",
97 "border_line_alpha": 0.5,
98 "border_line_color": self.neutral_focus,
99 "background_fill_alpha": 0.25,
100 "background_fill_color": self.neutral_fill_card_rest,
101 },
102 "ColorBar": {
103 "title_text_color": self.neutral_foreground_rest,
104 "title_text_font": self.font,
105 "title_text_font_size": "1.025em",
106 "title_text_font_style": "normal",
107 "major_label_text_color": self.neutral_foreground_rest,
108 "major_label_text_font": self.font,
109 "major_label_text_font_size": "1.025em",
110 # "background_fill_color": FAST_DARK_75,
111 "major_tick_line_alpha": 0,
112 "bar_line_alpha": 0,
113 },
114 "Title": {
115 "text_color": self.neutral_foreground_rest,
116 "text_font": self.font,
117 "text_font_size": "1.15em",
118 },
119 }
120 }
121
122
123 DEFAULT_STYLE = FastStyle()
124 DARK_STYLE = FastStyle(
125 accent_base_color="#E1477E",
126 background_color="#181818",
127 color="#ffffff",
128 header_background="#313131",
129 header_color="#ffffff",
130 neutral_fill_card_rest="#212121",
131 neutral_focus="#717171",
132 neutral_foreground_rest="#e5e5e5",
133 shadow = False,
134 )
135
136 class FastDefaultTheme(DefaultTheme):
137
138 base_css = param.Filename(default=_ROOT / 'fast_root_default.css')
139
140 style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)
141
142 __abstract = True
143
144 @property
145 def bokeh_theme(self):
146 return _BkTheme(json=self.style.create_bokeh_theme())
147
148
149 class FastDarkTheme(DarkTheme):
150
151 base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')
152
153 style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)
154
155 __abstract = True
156
157 @property
158 def bokeh_theme(self):
159 return _BkTheme(json=self.style.create_bokeh_theme())
160
[end of panel/template/fast/theme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py
--- a/panel/template/fast/theme.py
+++ b/panel/template/fast/theme.py
@@ -100,6 +100,7 @@
"background_fill_color": self.neutral_fill_card_rest,
},
"ColorBar": {
+ "background_fill_color": self.background_color,
"title_text_color": self.neutral_foreground_rest,
"title_text_font": self.font,
"title_text_font_size": "1.025em",
@@ -107,7 +108,6 @@
"major_label_text_color": self.neutral_foreground_rest,
"major_label_text_font": self.font,
"major_label_text_font_size": "1.025em",
- # "background_fill_color": FAST_DARK_75,
"major_tick_line_alpha": 0,
"bar_line_alpha": 0,
},
| {"golden_diff": "diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py\n--- a/panel/template/fast/theme.py\n+++ b/panel/template/fast/theme.py\n@@ -100,6 +100,7 @@\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n+ \"background_fill_color\": self.background_color,\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n@@ -107,7 +108,6 @@\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n- # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n", "issue": "HoloViews Colorbar not styled for dark themed template\nPanel: 0.12.4\r\n\r\nThe ColorBar is unreadable when using the `FastListTemplate` with the `dark` theme.\r\n\r\nI would like this to work for an example I am contributing to the `scikit-image` docs.\r\n\r\n\r\n\r\n```python\r\nimport holoviews as hv\r\nimport panel as pn\r\nfrom skimage import data, filters\r\n\r\nimage = data.coins()\r\nedges = filters.sobel(image)*256\r\nbounds = (-1, -1, 1, 1)\r\n\r\nafter_img = hv.Image(edges, bounds=bounds).apply.opts(\r\n cmap=\"binary_r\", responsive=True, title=\"After\", active_tools=[\"box_zoom\"], tools=[\"hover\"], colorbar=True\r\n)\r\n\r\nlayout=pn.panel(after_img, height=800, aspect_ratio=1)\r\n\r\npn.template.FastListTemplate(\r\n site=\"Panel and Scikit-Image\",\r\n main=[layout],\r\n header_background=\"#292929\",\r\n theme=\"dark\",\r\n).servable()\r\n```\r\n\r\n## How do I fix this?\r\n\r\nI would like to contribute a PR. I assume I would have to update the Bokeh Theme generation here https://github.com/holoviz/panel/blob/396ce2f37473a047e4721a1431e6795374686533/panel/template/fast/theme.py#L62.\r\n\r\nI can now see that I need to change to\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFunctionality for styling according to Fast.design\n\"\"\"\nimport pathlib\nimport param\n\nfrom bokeh.themes import Theme as _BkTheme\n\nfrom ..theme import DarkTheme, DefaultTheme\n\n_ROOT = pathlib.Path(__file__).parent / \"css\"\n\nCOLLAPSED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"collapsed-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M9 5.44446V12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nEXPANDED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"expanded-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nFONT_URL = \"//fonts.googleapis.com/css?family=Open+Sans\"\n\nclass FastStyle(param.Parameterized):\n \"\"\"\n The FastStyle class provides the different colors and icons used\n to style the Fast Templates.\n \"\"\"\n\n background_color = param.String(default=\"#ffffff\")\n neutral_color = param.String(default=\"#000000\")\n accent_base_color = param.String(default=\"#A01346\")\n collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)\n expanded_icon = param.String(default=EXPANDED_SVG_ICON)\n color = param.String(default=\"#00aa41\")\n neutral_fill_card_rest = param.String(default=\"#F7F7F7\")\n neutral_focus = param.String(default=\"#888888\")\n neutral_foreground_rest = param.String(default=\"#2B2B2B\")\n\n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n header_accent_base_color = param.String(default=\"#ffffff\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n corner_radius = param.Integer(default=3)\n shadow = param.Boolean(default=True)\n\n def create_bokeh_theme(self):\n \"\"\"Returns a custom bokeh theme based on the style parameters\n\n Returns:\n Dict: A Bokeh Theme\n \"\"\"\n\n return {\n \"attrs\": {\n \"Figure\": {\n \"background_fill_color\": self.background_color,\n \"border_fill_color\": self.neutral_fill_card_rest,\n \"border_fill_alpha\": 0,\n \"outline_line_color\": self.neutral_focus,\n \"outline_line_alpha\": 0.5,\n \"outline_line_width\": 1,\n },\n \"Grid\": {\"grid_line_color\": self.neutral_focus, \"grid_line_alpha\": 0.25},\n \"Axis\": {\n \"major_tick_line_alpha\": 0.5,\n \"major_tick_line_color\": self.neutral_foreground_rest,\n \"minor_tick_line_alpha\": 0.25,\n \"minor_tick_line_color\": self.neutral_foreground_rest,\n \"axis_line_alpha\": 0.1,\n \"axis_line_color\": self.neutral_foreground_rest,\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed\n # \"major_label_text_font_size\": \"1.025em\",\n \"axis_label_standoff\": 10,\n \"axis_label_text_color\": self.neutral_foreground_rest,\n \"axis_label_text_font\": self.font,\n \"axis_label_text_font_size\": \"1.25em\",\n \"axis_label_text_font_style\": \"normal\",\n },\n \"Legend\": {\n \"spacing\": 8,\n \"glyph_width\": 15,\n \"label_standoff\": 8,\n \"label_text_color\": self.neutral_foreground_rest,\n \"label_text_font\": self.font,\n \"label_text_font_size\": \"1.025em\",\n \"border_line_alpha\": 0.5,\n \"border_line_color\": self.neutral_focus,\n \"background_fill_alpha\": 0.25,\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n \"title_text_font_style\": \"normal\",\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n \"Title\": {\n \"text_color\": self.neutral_foreground_rest,\n \"text_font\": self.font,\n \"text_font_size\": \"1.15em\",\n },\n }\n }\n\n\nDEFAULT_STYLE = FastStyle()\nDARK_STYLE = FastStyle(\n accent_base_color=\"#E1477E\",\n background_color=\"#181818\",\n color=\"#ffffff\",\n header_background=\"#313131\",\n header_color=\"#ffffff\",\n neutral_fill_card_rest=\"#212121\",\n neutral_focus=\"#717171\",\n neutral_foreground_rest=\"#e5e5e5\",\n shadow = False,\n)\n\nclass FastDefaultTheme(DefaultTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_default.css')\n\n style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n\n\nclass FastDarkTheme(DarkTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')\n\n style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n", "path": "panel/template/fast/theme.py"}]} | 3,300 | 209 |
gh_patches_debug_7402 | rasdani/github-patches | git_diff | getsentry__sentry-66877 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect data in events when adding large integers to context data
Sentry is treating big values differently in tags and context causing some values to be in correct after event processing. Below is an example event I sent using the ruby SDK, the same behaviour happens in Python as well:
Script:
```
::Sentry.init do |config|
config.dsn = "_MY_DSN_"
end
Sentry.set_tags('bigNumber': 608548899684111178)
Sentry.configure_scope do |scope|
scope.set_context(
'arguments',
{
name: 'A big value',
age: 608548899684111178,
}
)
end
Sentry.capture_message("le big number test")
```
I expect to see both tag and context showing the same value, but in the context session it is showing as `608548899684111200` This is a common issue with large integer/floating point in Node.
A possible workaround is to add the value as a string when it is added manually, but it is not applicable when the data is automatically added by the SDK.

</issue>
<code>
[start of src/sentry/interfaces/contexts.py]
1 from __future__ import annotations
2
3 import string
4 from typing import ClassVar, TypeVar
5
6 from django.utils.encoding import force_str
7
8 from sentry.interfaces.base import Interface
9 from sentry.utils.json import prune_empty_keys
10 from sentry.utils.safe import get_path
11
12 __all__ = ("Contexts",)
13
14 ContextTypeT = TypeVar("ContextTypeT", bound="ContextType")
15
16 context_types: dict[str, type[ContextType]] = {}
17
18
19 class _IndexFormatter(string.Formatter):
20 def format_field(self, value, format_spec):
21 if not format_spec and isinstance(value, bool):
22 return value and "yes" or "no"
23 return string.Formatter.format_field(self, value, format_spec)
24
25
26 def format_index_expr(format_string, data):
27 return str(_IndexFormatter().vformat(str(format_string), (), data).strip())
28
29
30 def contexttype(cls: type[ContextTypeT]) -> type[ContextTypeT]:
31 context_types[cls.type] = cls
32 return cls
33
34
35 # NOTE: Are you adding a new context? Make sure to also update the
36 # documentation in the sentry develop docs [0]!
37 #
38 # [0]: https://develop.sentry.dev/sdk/event-payloads/contexts
39
40
41 class ContextType:
42 context_to_tag_mapping: ClassVar[dict[str, str]] = {}
43 """
44 This indicates which fields should be promoted into tags during event
45 normalization. (See EventManager)
46
47 The key for each entry is used as the name of the tag suffixed by the
48 "alias" of the context (this is the key of the context in the contexts
49 object, it is NOT the `type` of the context, though they are often the
50 same).
51
52 The value is a format string spec that uses python string.Formatter to
53 interpolate any value from the context object.
54
55 There is one special case:
56
57 - When the key of the mapping is an empty string the tag name will simply be
58 the alias.
59
60 For example if you have a context named "myContext" with the data:
61
62 ```json
63 "myContext": {
64 "some_value": "hello world",
65 "subkey": "whatever",
66 "type": "myContext"
67 }
68 ```
69
70 and you have a context_to_tag_mapping that looks like
71
72 ```python
73 context_to_tag_mapping = {"": "{some_value}", "subkey": "{subkey}"}
74 ```
75
76 Then normalization will result in two tags being promoted:
77
78 - myContext: "hello world"
79 - myContext.subkey: "whatever"
80 """
81
82 type: str
83 """This should match the `type` key in context object"""
84
85 def __init__(self, alias, data):
86 self.alias = alias
87 ctx_data = {}
88 for key, value in data.items():
89 # we use a simple check here, rather than ' in set()' to avoid
90 # issues with maps/lists.
91
92 # Even if the value is an empty string,
93 # we still want to display the info the UI
94 if value is not None:
95 ctx_data[force_str(key)] = value
96 self.data = ctx_data
97
98 def to_json(self):
99 rv = dict(self.data)
100 rv["type"] = self.type
101 return prune_empty_keys(rv)
102
103 @classmethod
104 def values_for_data(cls, data):
105 rv = []
106 for context in (data.get("contexts") or {}).values():
107 if context and context.get("type") == cls.type:
108 rv.append(context)
109 return rv
110
111 @classmethod
112 def primary_value_for_data(cls, data):
113 val = get_path(data, "contexts", cls.type)
114 if val and val.get("type") == cls.type:
115 return val
116
117 rv = cls.values_for_data(data)
118 if len(rv) == 1:
119 return rv[0]
120
121 def iter_tags(self):
122 if self.context_to_tag_mapping:
123 for field, f_string in self.context_to_tag_mapping.items():
124 try:
125 value = format_index_expr(f_string, self.data)
126 except KeyError:
127 continue
128 if value:
129 if not field:
130 yield (self.alias, value)
131 else:
132 yield (f"{self.alias}.{field}", value)
133
134
135 # TODO(dcramer): contexts need to document/describe expected (optional) fields
136 @contexttype
137 class DefaultContextType(ContextType):
138 type = "default"
139
140
141 @contexttype
142 class AppContextType(ContextType):
143 type = "app"
144 context_to_tag_mapping = {"device": "{device_app_hash}"}
145
146
147 @contexttype
148 class DeviceContextType(ContextType):
149 type = "device"
150 context_to_tag_mapping = {"": "{model}", "family": "{family}"}
151 # model_id, arch
152
153
154 @contexttype
155 class RuntimeContextType(ContextType):
156 type = "runtime"
157 context_to_tag_mapping = {"": "{name} {version}", "name": "{name}"}
158
159
160 @contexttype
161 class BrowserContextType(ContextType):
162 type = "browser"
163 context_to_tag_mapping = {"": "{name} {version}", "name": "{name}"}
164 # viewport
165
166
167 @contexttype
168 class OsContextType(ContextType):
169 type = "os"
170 context_to_tag_mapping = {"": "{name} {version}", "name": "{name}", "rooted": "{rooted}"}
171 # build, rooted
172
173
174 @contexttype
175 class GpuContextType(ContextType):
176 type = "gpu"
177 context_to_tag_mapping = {"name": "{name}", "vendor": "{vendor_name}"}
178
179
180 @contexttype
181 class MonitorContextType(ContextType):
182 type = "monitor"
183 context_to_tag_mapping = {"id": "{id}", "slug": "{slug}"}
184
185
186 @contexttype
187 class TraceContextType(ContextType):
188 type = "trace"
189 context_to_tag_mapping = {}
190
191
192 @contexttype
193 class OtelContextType(ContextType):
194 type = "otel"
195 context_to_tag_mapping = {}
196
197
198 class Contexts(Interface):
199 """
200 This interface stores context specific information.
201 """
202
203 display_score = 1100
204 score = 800
205
206 @classmethod
207 def to_python(cls, data, **kwargs):
208 rv = {}
209
210 # Note the alias is the key of the context entry
211 for alias, value in data.items():
212 # XXX(markus): The `None`-case should be handled in the UI and
213 # other consumers of this interface
214 if value is not None:
215 rv[alias] = cls.normalize_context(alias, value)
216
217 return super().to_python(rv, **kwargs)
218
219 @classmethod
220 def normalize_context(cls, alias, data):
221 ctx_type = data.get("type", alias)
222 ctx_cls = context_types.get(ctx_type, DefaultContextType)
223 return ctx_cls(alias, data)
224
225 def iter_contexts(self):
226 return self._data.values()
227
228 def to_json(self):
229 rv = {}
230 for alias, inst in self._data.items():
231 rv[alias] = inst.to_json()
232 return rv
233
234 def iter_tags(self):
235 for inst in self.iter_contexts():
236 yield from inst.iter_tags()
237
[end of src/sentry/interfaces/contexts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/interfaces/contexts.py b/src/sentry/interfaces/contexts.py
--- a/src/sentry/interfaces/contexts.py
+++ b/src/sentry/interfaces/contexts.py
@@ -93,6 +93,9 @@
# we still want to display the info the UI
if value is not None:
ctx_data[force_str(key)] = value
+ # Numbers exceeding 15 place values will be converted to strings to avoid rendering issues
+ if isinstance(value, (int, float)) and len(str_value := force_str(value)) > 15:
+ ctx_data[force_str(key)] = str_value
self.data = ctx_data
def to_json(self):
| {"golden_diff": "diff --git a/src/sentry/interfaces/contexts.py b/src/sentry/interfaces/contexts.py\n--- a/src/sentry/interfaces/contexts.py\n+++ b/src/sentry/interfaces/contexts.py\n@@ -93,6 +93,9 @@\n # we still want to display the info the UI\n if value is not None:\n ctx_data[force_str(key)] = value\n+ # Numbers exceeding 15 place values will be converted to strings to avoid rendering issues\n+ if isinstance(value, (int, float)) and len(str_value := force_str(value)) > 15:\n+ ctx_data[force_str(key)] = str_value\n self.data = ctx_data\n \n def to_json(self):\n", "issue": "Incorrect data in events when adding large integers to context data \nSentry is treating big values differently in tags and context causing some values to be in correct after event processing. Below is an example event I sent using the ruby SDK, the same behaviour happens in Python as well: \r\n\r\nScript:\r\n```\r\n::Sentry.init do |config|\r\n config.dsn = \"_MY_DSN_\"\r\nend\r\n\r\nSentry.set_tags('bigNumber': 608548899684111178)\r\nSentry.configure_scope do |scope|\r\n scope.set_context(\r\n 'arguments',\r\n {\r\n name: 'A big value',\r\n age: 608548899684111178,\r\n }\r\n )\r\nend\r\n\r\nSentry.capture_message(\"le big number test\")\r\n```\r\n\r\nI expect to see both tag and context showing the same value, but in the context session it is showing as `608548899684111200` This is a common issue with large integer/floating point in Node. \r\n\r\nA possible workaround is to add the value as a string when it is added manually, but it is not applicable when the data is automatically added by the SDK.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport string\nfrom typing import ClassVar, TypeVar\n\nfrom django.utils.encoding import force_str\n\nfrom sentry.interfaces.base import Interface\nfrom sentry.utils.json import prune_empty_keys\nfrom sentry.utils.safe import get_path\n\n__all__ = (\"Contexts\",)\n\nContextTypeT = TypeVar(\"ContextTypeT\", bound=\"ContextType\")\n\ncontext_types: dict[str, type[ContextType]] = {}\n\n\nclass _IndexFormatter(string.Formatter):\n def format_field(self, value, format_spec):\n if not format_spec and isinstance(value, bool):\n return value and \"yes\" or \"no\"\n return string.Formatter.format_field(self, value, format_spec)\n\n\ndef format_index_expr(format_string, data):\n return str(_IndexFormatter().vformat(str(format_string), (), data).strip())\n\n\ndef contexttype(cls: type[ContextTypeT]) -> type[ContextTypeT]:\n context_types[cls.type] = cls\n return cls\n\n\n# NOTE: Are you adding a new context? Make sure to also update the\n# documentation in the sentry develop docs [0]!\n#\n# [0]: https://develop.sentry.dev/sdk/event-payloads/contexts\n\n\nclass ContextType:\n context_to_tag_mapping: ClassVar[dict[str, str]] = {}\n \"\"\"\n This indicates which fields should be promoted into tags during event\n normalization. (See EventManager)\n\n The key for each entry is used as the name of the tag suffixed by the\n \"alias\" of the context (this is the key of the context in the contexts\n object, it is NOT the `type` of the context, though they are often the\n same).\n\n The value is a format string spec that uses python string.Formatter to\n interpolate any value from the context object.\n\n There is one special case:\n\n - When the key of the mapping is an empty string the tag name will simply be\n the alias.\n\n For example if you have a context named \"myContext\" with the data:\n\n ```json\n \"myContext\": {\n \"some_value\": \"hello world\",\n \"subkey\": \"whatever\",\n \"type\": \"myContext\"\n }\n ```\n\n and you have a context_to_tag_mapping that looks like\n\n ```python\n context_to_tag_mapping = {\"\": \"{some_value}\", \"subkey\": \"{subkey}\"}\n ```\n\n Then normalization will result in two tags being promoted:\n\n - myContext: \"hello world\"\n - myContext.subkey: \"whatever\"\n \"\"\"\n\n type: str\n \"\"\"This should match the `type` key in context object\"\"\"\n\n def __init__(self, alias, data):\n self.alias = alias\n ctx_data = {}\n for key, value in data.items():\n # we use a simple check here, rather than ' in set()' to avoid\n # issues with maps/lists.\n\n # Even if the value is an empty string,\n # we still want to display the info the UI\n if value is not None:\n ctx_data[force_str(key)] = value\n self.data = ctx_data\n\n def to_json(self):\n rv = dict(self.data)\n rv[\"type\"] = self.type\n return prune_empty_keys(rv)\n\n @classmethod\n def values_for_data(cls, data):\n rv = []\n for context in (data.get(\"contexts\") or {}).values():\n if context and context.get(\"type\") == cls.type:\n rv.append(context)\n return rv\n\n @classmethod\n def primary_value_for_data(cls, data):\n val = get_path(data, \"contexts\", cls.type)\n if val and val.get(\"type\") == cls.type:\n return val\n\n rv = cls.values_for_data(data)\n if len(rv) == 1:\n return rv[0]\n\n def iter_tags(self):\n if self.context_to_tag_mapping:\n for field, f_string in self.context_to_tag_mapping.items():\n try:\n value = format_index_expr(f_string, self.data)\n except KeyError:\n continue\n if value:\n if not field:\n yield (self.alias, value)\n else:\n yield (f\"{self.alias}.{field}\", value)\n\n\n# TODO(dcramer): contexts need to document/describe expected (optional) fields\n@contexttype\nclass DefaultContextType(ContextType):\n type = \"default\"\n\n\n@contexttype\nclass AppContextType(ContextType):\n type = \"app\"\n context_to_tag_mapping = {\"device\": \"{device_app_hash}\"}\n\n\n@contexttype\nclass DeviceContextType(ContextType):\n type = \"device\"\n context_to_tag_mapping = {\"\": \"{model}\", \"family\": \"{family}\"}\n # model_id, arch\n\n\n@contexttype\nclass RuntimeContextType(ContextType):\n type = \"runtime\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\"}\n\n\n@contexttype\nclass BrowserContextType(ContextType):\n type = \"browser\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\"}\n # viewport\n\n\n@contexttype\nclass OsContextType(ContextType):\n type = \"os\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\", \"rooted\": \"{rooted}\"}\n # build, rooted\n\n\n@contexttype\nclass GpuContextType(ContextType):\n type = \"gpu\"\n context_to_tag_mapping = {\"name\": \"{name}\", \"vendor\": \"{vendor_name}\"}\n\n\n@contexttype\nclass MonitorContextType(ContextType):\n type = \"monitor\"\n context_to_tag_mapping = {\"id\": \"{id}\", \"slug\": \"{slug}\"}\n\n\n@contexttype\nclass TraceContextType(ContextType):\n type = \"trace\"\n context_to_tag_mapping = {}\n\n\n@contexttype\nclass OtelContextType(ContextType):\n type = \"otel\"\n context_to_tag_mapping = {}\n\n\nclass Contexts(Interface):\n \"\"\"\n This interface stores context specific information.\n \"\"\"\n\n display_score = 1100\n score = 800\n\n @classmethod\n def to_python(cls, data, **kwargs):\n rv = {}\n\n # Note the alias is the key of the context entry\n for alias, value in data.items():\n # XXX(markus): The `None`-case should be handled in the UI and\n # other consumers of this interface\n if value is not None:\n rv[alias] = cls.normalize_context(alias, value)\n\n return super().to_python(rv, **kwargs)\n\n @classmethod\n def normalize_context(cls, alias, data):\n ctx_type = data.get(\"type\", alias)\n ctx_cls = context_types.get(ctx_type, DefaultContextType)\n return ctx_cls(alias, data)\n\n def iter_contexts(self):\n return self._data.values()\n\n def to_json(self):\n rv = {}\n for alias, inst in self._data.items():\n rv[alias] = inst.to_json()\n return rv\n\n def iter_tags(self):\n for inst in self.iter_contexts():\n yield from inst.iter_tags()\n", "path": "src/sentry/interfaces/contexts.py"}]} | 3,061 | 153 |
gh_patches_debug_29778 | rasdani/github-patches | git_diff | vaexio__vaex-1027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow lazy file opening for parquet formatted files
When I run `open` on an HDF5 file, the operation is done lazily – I believe that file handlers are opened but nothing is loaded into RAM. However running `open` on a parquet file results in eager evaluation, where data is immediately loaded into RAM. (Same goes for `open_many`, FWIW). This means that it's impossible to use vaex on large-than-RAM datasets if they're stored as parquet.
I've put a minimal working example below, although note it'll use like 3-6GB of RAM and dump like 5 GB to your HD:
```python
import numpy as np
import pandas as pd
import time
import vaex
# Creates a roughly 2.4 GB dataframe
pandas_df = pd.DataFrame({
"numeric_1": np.random.rand(100000000),
"numeric_2": np.random.rand(100000000),
"numeric_3": np.random.rand(100000000)
})
# Saves to parquet and vaex-formatted hdf5
pandas_df.to_parquet("temp_df.parquet", index=False)
vaex_df = vaex.from_pandas(pandas_df)
vaex_df.export_hdf5("temp_df.hdf5")
del vaex_df
del pandas_df
start = time.time()
vaex_df_hdf5 = vaex.open("temp_df.hdf5")
print(f"Took {time.time() - start:.2f} seconds to open the hdf5 file")
start = time.time()
vaex_df_parquet = vaex.open("temp_df.parquet")
print(f"Took {time.time() - start:.2f} seconds to open the parquet file")
```
Output on my machine:
```
Took 0.01 seconds to open the hdf5 file
Took 7.89 seconds to open the parquet file
```
(You can also see that RAM usage is up, although printing that to screen from python is trickier 😄.)
I have no idea if it's possible, but it would be really awesome to get the same lazy opening for parquet files – A fair amount of the files I interact with are stored as parquet, and converting to HDF5 would be time consuming and annoying.
</issue>
<code>
[start of packages/vaex-core/vaex/arrow/dataset.py]
1 __author__ = 'maartenbreddels'
2 import collections
3 import concurrent.futures
4 import logging
5 import multiprocessing
6 import os
7
8 import pyarrow as pa
9 import pyarrow.dataset
10
11 import vaex.dataset
12 import vaex.file.other
13 from ..itertools import buffer
14
15
16 logger = logging.getLogger("vaex.arrow.dataset")
17
18 thread_count_default_io = os.environ.get('VAEX_NUM_THREADS_IO', multiprocessing.cpu_count() * 2 + 1)
19 thread_count_default_io = int(thread_count_default_io)
20 main_io_pool = None
21
22 logger = logging.getLogger("vaex.multithreading")
23
24
25 def get_main_io_pool():
26 global main_io_pool
27 if main_io_pool is None:
28 main_io_pool = concurrent.futures.ThreadPoolExecutor(max_workers=thread_count_default_io)
29 return main_io_pool
30
31
32 class DatasetArrow(vaex.dataset.Dataset):
33 def __init__(self, ds):
34 super().__init__()
35 self._arrow_ds = ds
36 row_count = 0
37 for fragment in self._arrow_ds.get_fragments():
38 if hasattr(fragment, "ensure_complete_metadata"):
39 fragment.ensure_complete_metadata()
40 for rg in fragment.row_groups:
41 row_count += rg.num_rows
42 self._row_count = row_count
43 self._columns = {name: vaex.dataset.ColumnProxy(self, name, type) for name, type in
44 zip(self._arrow_ds.schema.names, self._arrow_ds.schema.types)}
45 self._ids = {}
46
47 def hashed(self):
48 raise NotImplementedError
49
50 def slice(self, start, end):
51 # TODO: we can be smarter here, and trim off some fragments
52 if start == 0 and end == self.row_count:
53 return self
54 return vaex.dataset.DatasetSliced(self, start=start, end=end)
55
56 def is_masked(self, column):
57 return False
58
59 def shape(self, column):
60 return tuple()
61
62 def __getitem__(self, item):
63 if isinstance(item, slice):
64 assert item.step in [1, None]
65 return vaex.dataset.DatasetSliced(self, item.start or 0, item.stop or self.row_count)
66 return self._columns[item]
67
68 def close(self):
69 # no need to close it, it seem
70 pass
71
72 def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):
73 pool = get_main_io_pool()
74 offset = 0
75 for fragment_large in self._arrow_ds.get_fragments():
76 fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])
77 # when do we want to split up? File size? max chunk size?
78 # if fragment_large_rows > chunk_size:
79 # fragments = fragment_large.split_by_row_group()
80 # else:
81 # # or not
82 # fragments = [fragment_large]
83 import pyarrow.parquet
84 fragments = [fragment_large]
85 for fragment in fragments:
86 rows = sum([rg.num_rows for rg in fragment.row_groups])
87 chunk_start = offset
88 chunk_end = offset + rows
89
90 length = chunk_end - chunk_start # default length
91
92 if start >= chunk_end: # we didn't find the beginning yet
93 offset += length
94 continue
95 if end < chunk_start: # we are past the end
96 # assert False
97 break
98 def reader(fragment=fragment):
99 table = fragment.to_table(columns=columns, use_threads=False)
100 chunks = dict(zip(table.column_names, table.columns))
101 return chunks
102
103 if start > chunk_start:
104 # this means we have to cut off a piece of the beginning
105 if end < chunk_end:
106 # AND the end
107 length = end - chunk_start # without the start cut off
108 length -= start - chunk_start # correcting for the start cut off
109 def slicer(chunk_start=chunk_start, reader=reader, length=length):
110 chunks = reader()
111 chunks = {name: ar.slice(start - chunk_start, length) for name, ar in chunks.items()}
112 for name, ar in chunks.items():
113 assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
114 return chunks
115 reader = slicer
116 else:
117 length -= start - chunk_start # correcting for the start cut off
118 def slicer(chunk_start=chunk_start, reader=reader, length=length):
119 chunks = reader()
120 chunks = {name: ar.slice(start - chunk_start) for name, ar in chunks.items()}
121 for name, ar in chunks.items():
122 assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
123 return chunks
124 reader = slicer
125 else:
126 if end < chunk_end:
127 # we only need to cut off a piece of the end
128 length = end - chunk_start
129 def slicer(chunk_start=chunk_start, reader=reader, length=length):
130 chunks = reader()
131 chunks = {name: ar.slice(0, length) for name, ar in chunks.items()}
132 for name, ar in chunks.items():
133 assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
134 return chunks
135 reader = slicer
136 offset += rows
137 yield pool.submit(reader)
138
139 def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):
140 chunk_size = chunk_size or 1024*1024
141 i1 = 0
142 chunks_ready_list = []
143 i1 = i2 = 0
144
145 for chunks_future in buffer(self._chunk_producer(columns, chunk_size, start=start, end=end or self._row_count), thread_count_default_io+3):
146 chunks = chunks_future.result()
147 chunks_ready_list.append(chunks)
148 total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])
149 if total_row_count > chunk_size:
150 chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
151 i2 += current_row_count
152 yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)
153 i1 = i2
154
155 while chunks_ready_list:
156 chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
157 i2 += current_row_count
158 yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)
159 i1 = i2
160
161
162
163 def from_table(table, as_numpy=False):
164 columns = dict(zip(table.schema.names, table.columns))
165 # TODO: this should be an DatasetArrow and/or DatasetParquet
166 dataset = vaex.dataset.DatasetArrays(columns)
167 df = vaex.dataframe.DataFrameLocal(dataset)
168 return df.as_numpy() if as_numpy else df
169
170
171 def open(filename, as_numpy=False):
172 source = pa.memory_map(filename)
173 try:
174 # first we try if it opens as stream
175 reader = pa.ipc.open_stream(source)
176 except pa.lib.ArrowInvalid:
177 # if not, we open as file
178 reader = pa.ipc.open_file(source)
179 # for some reason this reader is not iterable
180 batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]
181 else:
182 # if a stream, we're good
183 batches = reader # this reader is iterable
184 table = pa.Table.from_batches(batches)
185 return from_table(table, as_numpy=as_numpy)
186
187
188 def open_parquet(filename, as_numpy=False):
189 arrow_ds = pyarrow.dataset.dataset(filename)
190 ds = DatasetArrow(arrow_ds)
191 return vaex.from_dataset(ds)
192
193 # vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
194 # vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
195
196
[end of packages/vaex-core/vaex/arrow/dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-core/vaex/arrow/dataset.py b/packages/vaex-core/vaex/arrow/dataset.py
--- a/packages/vaex-core/vaex/arrow/dataset.py
+++ b/packages/vaex-core/vaex/arrow/dataset.py
@@ -30,8 +30,9 @@
class DatasetArrow(vaex.dataset.Dataset):
- def __init__(self, ds):
+ def __init__(self, ds, max_rows_read=1024**2*10):
super().__init__()
+ self.max_rows_read = max_rows_read
self._arrow_ds = ds
row_count = 0
for fragment in self._arrow_ds.get_fragments():
@@ -70,18 +71,15 @@
pass
def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):
+ import pyarrow.parquet
pool = get_main_io_pool()
offset = 0
for fragment_large in self._arrow_ds.get_fragments():
fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])
- # when do we want to split up? File size? max chunk size?
- # if fragment_large_rows > chunk_size:
- # fragments = fragment_large.split_by_row_group()
- # else:
- # # or not
- # fragments = [fragment_large]
- import pyarrow.parquet
fragments = [fragment_large]
+ # when do we want to split up? File size? max chunk size?
+ if fragment_large_rows > self.max_rows_read:
+ fragments = fragment_large.split_by_row_group()
for fragment in fragments:
rows = sum([rg.num_rows for rg in fragment.row_groups])
chunk_start = offset
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/arrow/dataset.py b/packages/vaex-core/vaex/arrow/dataset.py\n--- a/packages/vaex-core/vaex/arrow/dataset.py\n+++ b/packages/vaex-core/vaex/arrow/dataset.py\n@@ -30,8 +30,9 @@\n \n \n class DatasetArrow(vaex.dataset.Dataset):\n- def __init__(self, ds):\n+ def __init__(self, ds, max_rows_read=1024**2*10):\n super().__init__()\n+ self.max_rows_read = max_rows_read\n self._arrow_ds = ds\n row_count = 0\n for fragment in self._arrow_ds.get_fragments():\n@@ -70,18 +71,15 @@\n pass\n \n def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n+ import pyarrow.parquet\n pool = get_main_io_pool()\n offset = 0\n for fragment_large in self._arrow_ds.get_fragments():\n fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])\n- # when do we want to split up? File size? max chunk size?\n- # if fragment_large_rows > chunk_size:\n- # fragments = fragment_large.split_by_row_group()\n- # else:\n- # # or not\n- # fragments = [fragment_large]\n- import pyarrow.parquet\n fragments = [fragment_large]\n+ # when do we want to split up? File size? max chunk size?\n+ if fragment_large_rows > self.max_rows_read:\n+ fragments = fragment_large.split_by_row_group()\n for fragment in fragments:\n rows = sum([rg.num_rows for rg in fragment.row_groups])\n chunk_start = offset\n", "issue": "Allow lazy file opening for parquet formatted files\nWhen I run `open` on an HDF5 file, the operation is done lazily \u2013\u00a0I believe that file handlers are opened but nothing is loaded into RAM. However running `open` on a parquet file results in eager evaluation, where data is immediately loaded into RAM. (Same goes for `open_many`, FWIW). This means that it's impossible to use vaex on large-than-RAM datasets if they're stored as parquet.\r\n\r\nI've put a minimal working example below, although note it'll use like 3-6GB of RAM and dump like 5 GB to your HD:\r\n\r\n```python\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\nimport vaex\r\n\r\n# Creates a roughly 2.4 GB dataframe\r\npandas_df = pd.DataFrame({\r\n \"numeric_1\": np.random.rand(100000000),\r\n \"numeric_2\": np.random.rand(100000000),\r\n \"numeric_3\": np.random.rand(100000000)\r\n})\r\n\r\n# Saves to parquet and vaex-formatted hdf5\r\npandas_df.to_parquet(\"temp_df.parquet\", index=False)\r\nvaex_df = vaex.from_pandas(pandas_df)\r\nvaex_df.export_hdf5(\"temp_df.hdf5\")\r\n\r\ndel vaex_df\r\ndel pandas_df\r\n\r\nstart = time.time()\r\nvaex_df_hdf5 = vaex.open(\"temp_df.hdf5\")\r\nprint(f\"Took {time.time() - start:.2f} seconds to open the hdf5 file\")\r\n\r\nstart = time.time()\r\nvaex_df_parquet = vaex.open(\"temp_df.parquet\")\r\nprint(f\"Took {time.time() - start:.2f} seconds to open the parquet file\")\r\n```\r\n\r\nOutput on my machine:\r\n```\r\nTook 0.01 seconds to open the hdf5 file\r\nTook 7.89 seconds to open the parquet file\r\n```\r\n\r\n(You can also see that RAM usage is up, although printing that to screen from python is trickier \ud83d\ude04.)\r\n\r\nI have no idea if it's possible, but it would be really awesome to get the same lazy opening for parquet files \u2013\u00a0A fair amount of the files I interact with are stored as parquet, and converting to HDF5 would be time consuming and annoying. \n", "before_files": [{"content": "__author__ = 'maartenbreddels'\nimport collections\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\n\nimport pyarrow as pa\nimport pyarrow.dataset\n\nimport vaex.dataset\nimport vaex.file.other\nfrom ..itertools import buffer\n\n\nlogger = logging.getLogger(\"vaex.arrow.dataset\")\n\nthread_count_default_io = os.environ.get('VAEX_NUM_THREADS_IO', multiprocessing.cpu_count() * 2 + 1)\nthread_count_default_io = int(thread_count_default_io)\nmain_io_pool = None\n\nlogger = logging.getLogger(\"vaex.multithreading\")\n\n\ndef get_main_io_pool():\n global main_io_pool\n if main_io_pool is None:\n main_io_pool = concurrent.futures.ThreadPoolExecutor(max_workers=thread_count_default_io)\n return main_io_pool\n\n\nclass DatasetArrow(vaex.dataset.Dataset):\n def __init__(self, ds):\n super().__init__()\n self._arrow_ds = ds\n row_count = 0\n for fragment in self._arrow_ds.get_fragments():\n if hasattr(fragment, \"ensure_complete_metadata\"):\n fragment.ensure_complete_metadata()\n for rg in fragment.row_groups:\n row_count += rg.num_rows\n self._row_count = row_count\n self._columns = {name: vaex.dataset.ColumnProxy(self, name, type) for name, type in\n zip(self._arrow_ds.schema.names, self._arrow_ds.schema.types)}\n self._ids = {}\n\n def hashed(self):\n raise NotImplementedError\n\n def slice(self, start, end):\n # TODO: we can be smarter here, and trim off some fragments\n if start == 0 and end == self.row_count:\n return self\n return vaex.dataset.DatasetSliced(self, start=start, end=end)\n\n def is_masked(self, column):\n return False\n\n def shape(self, column):\n return tuple()\n\n def __getitem__(self, item):\n if isinstance(item, slice):\n assert item.step in [1, None]\n return vaex.dataset.DatasetSliced(self, item.start or 0, item.stop or self.row_count)\n return self._columns[item]\n\n def close(self):\n # no need to close it, it seem\n pass\n\n def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n pool = get_main_io_pool()\n offset = 0\n for fragment_large in self._arrow_ds.get_fragments():\n fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])\n # when do we want to split up? File size? max chunk size?\n # if fragment_large_rows > chunk_size:\n # fragments = fragment_large.split_by_row_group()\n # else:\n # # or not\n # fragments = [fragment_large]\n import pyarrow.parquet\n fragments = [fragment_large]\n for fragment in fragments:\n rows = sum([rg.num_rows for rg in fragment.row_groups])\n chunk_start = offset\n chunk_end = offset + rows\n\n length = chunk_end - chunk_start # default length\n\n if start >= chunk_end: # we didn't find the beginning yet\n offset += length\n continue\n if end < chunk_start: # we are past the end\n # assert False\n break\n def reader(fragment=fragment):\n table = fragment.to_table(columns=columns, use_threads=False)\n chunks = dict(zip(table.column_names, table.columns))\n return chunks\n\n if start > chunk_start:\n # this means we have to cut off a piece of the beginning\n if end < chunk_end:\n # AND the end\n length = end - chunk_start # without the start cut off\n length -= start - chunk_start # correcting for the start cut off\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(start - chunk_start, length) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n else:\n length -= start - chunk_start # correcting for the start cut off\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(start - chunk_start) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n else:\n if end < chunk_end:\n # we only need to cut off a piece of the end\n length = end - chunk_start\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(0, length) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n offset += rows\n yield pool.submit(reader)\n\n def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n chunk_size = chunk_size or 1024*1024\n i1 = 0\n chunks_ready_list = []\n i1 = i2 = 0\n\n for chunks_future in buffer(self._chunk_producer(columns, chunk_size, start=start, end=end or self._row_count), thread_count_default_io+3):\n chunks = chunks_future.result()\n chunks_ready_list.append(chunks)\n total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])\n if total_row_count > chunk_size:\n chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)\n i2 += current_row_count\n yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)\n i1 = i2\n\n while chunks_ready_list:\n chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)\n i2 += current_row_count\n yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)\n i1 = i2\n\n\n\ndef from_table(table, as_numpy=False):\n columns = dict(zip(table.schema.names, table.columns))\n # TODO: this should be an DatasetArrow and/or DatasetParquet\n dataset = vaex.dataset.DatasetArrays(columns)\n df = vaex.dataframe.DataFrameLocal(dataset)\n return df.as_numpy() if as_numpy else df\n\n\ndef open(filename, as_numpy=False):\n source = pa.memory_map(filename)\n try:\n # first we try if it opens as stream\n reader = pa.ipc.open_stream(source)\n except pa.lib.ArrowInvalid:\n # if not, we open as file\n reader = pa.ipc.open_file(source)\n # for some reason this reader is not iterable\n batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]\n else:\n # if a stream, we're good\n batches = reader # this reader is iterable\n table = pa.Table.from_batches(batches)\n return from_table(table, as_numpy=as_numpy)\n\n\ndef open_parquet(filename, as_numpy=False):\n arrow_ds = pyarrow.dataset.dataset(filename)\n ds = DatasetArrow(arrow_ds)\n return vaex.from_dataset(ds)\n\n# vaex.file.other.dataset_type_map[\"arrow\"] = DatasetArrow\n# vaex.file.other.dataset_type_map[\"parquet\"] = DatasetParquet\n\n", "path": "packages/vaex-core/vaex/arrow/dataset.py"}]} | 3,262 | 404 |
gh_patches_debug_7686 | rasdani/github-patches | git_diff | ivy-llc__ivy-22038 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
max_pool3d
</issue>
<code>
[start of ivy/functional/frontends/torch/nn/functional/pooling_functions.py]
1 # global
2 from functools import reduce
3
4 # local
5 import ivy
6 from ivy import with_unsupported_dtypes
7 from ivy.functional.frontends.torch.func_wrapper import (
8 to_ivy_arrays_and_back,
9 )
10
11
12 # --- Helpers --- #
13 # --------------- #
14
15
16 def _broadcast_pooling_helper(x, pool_dims: str = "2d", name: str = "padding"):
17 dims = {"1d": 1, "2d": 2, "3d": 3}
18
19 if isinstance(x, int):
20 return tuple([x for _ in range(dims[pool_dims])])
21
22 if len(x) == 1:
23 return tuple([x[0] for _ in range(dims[pool_dims])])
24 elif len(x) == dims[pool_dims]:
25 return tuple(x)
26 elif len(x) != dims[pool_dims]:
27 raise ValueError(
28 f"`{name}` must either be a single int, "
29 f"or a tuple of {dims[pool_dims]} ints. "
30 )
31
32
33 # --- Main --- #
34 # ------------ #
35
36
37 @with_unsupported_dtypes(
38 {
39 "2.0.1 and below": (
40 "bfloat16",
41 "float16",
42 )
43 },
44 "torch",
45 )
46 @to_ivy_arrays_and_back
47 def adaptive_avg_pool1d(input, output_size):
48 return ivy.adaptive_avg_pool1d(input, output_size)
49
50
51 @with_unsupported_dtypes(
52 {
53 "2.0.1 and below": (
54 "float16",
55 "bfloat16",
56 )
57 },
58 "torch",
59 )
60 @to_ivy_arrays_and_back
61 def adaptive_avg_pool2d(input, output_size):
62 return ivy.adaptive_avg_pool2d(input, output_size)
63
64
65 @to_ivy_arrays_and_back
66 def adaptive_max_pool2d(
67 input,
68 output_size,
69 return_indices=False,
70 ):
71 # ToDo: Add return_indices once superset is implemented
72 return ivy.adaptive_max_pool2d(input, output_size)
73
74
75 @to_ivy_arrays_and_back
76 def avg_pool1d(
77 input,
78 kernel_size,
79 stride=None,
80 padding=0,
81 ceil_mode=False,
82 count_include_pad=True,
83 ):
84 if stride is None:
85 stride = kernel_size
86 data_format = "NCW"
87 # TODO: remove the broadcasting and padding string specification when ivy.avg_pool
88 # support explicit padding
89 kernel_size = _broadcast_pooling_helper(kernel_size, "1d", name="kernel_size")
90 padding = _broadcast_pooling_helper(padding, "1d", name="padding")
91 if all(
92 [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]
93 ):
94 padding = "SAME"
95 else:
96 padding = "VALID"
97 return ivy.avg_pool1d(
98 input,
99 kernel_size,
100 stride,
101 padding,
102 data_format=data_format,
103 count_include_pad=count_include_pad,
104 ceil_mode=ceil_mode,
105 )
106
107
108 @to_ivy_arrays_and_back
109 def avg_pool2d(
110 input,
111 kernel_size,
112 stride=None,
113 padding=0,
114 ceil_mode=False,
115 count_include_pad=True,
116 divisor_override=None,
117 ):
118 if stride is None:
119 stride = kernel_size
120 data_format = "NCHW"
121 # TODO: remove the broadcasting and padding string specification when ivy.avg_pool
122 # support explicit padding
123 kernel_size = _broadcast_pooling_helper(kernel_size, "2d", name="kernel_size")
124 padding = _broadcast_pooling_helper(padding, "2d", name="padding")
125 if all(
126 [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]
127 ):
128 padding = "SAME"
129 else:
130 padding = "VALID"
131 return ivy.avg_pool2d(
132 input,
133 kernel_size,
134 stride,
135 padding,
136 data_format=data_format,
137 ceil_mode=ceil_mode,
138 count_include_pad=count_include_pad,
139 divisor_override=divisor_override,
140 )
141
142
143 @to_ivy_arrays_and_back
144 def avg_pool3d(
145 input,
146 kernel_size,
147 stride=None,
148 padding=0,
149 ceil_mode=False,
150 count_include_pad=True,
151 divisor_override=None,
152 ):
153 if stride is None:
154 stride = kernel_size
155 # TODO: remove the broadcasting and padding string specification when ivy.avg_pool
156 # support explicit padding
157 kernel_size = _broadcast_pooling_helper(kernel_size, "3d", name="kernel_size")
158 padding = _broadcast_pooling_helper(padding, "3d", name="padding")
159 if all(
160 [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]
161 ):
162 padding = "SAME"
163 else:
164 padding = "VALID"
165 return ivy.avg_pool3d(
166 input,
167 kernel_size,
168 stride,
169 padding,
170 data_format="NCDHW",
171 ceil_mode=ceil_mode,
172 count_include_pad=count_include_pad,
173 divisor_override=divisor_override,
174 )
175
176
177 @with_unsupported_dtypes(
178 {
179 "2.0.1 and below": (
180 "float16",
181 "bfloat16",
182 )
183 },
184 "torch",
185 )
186 @to_ivy_arrays_and_back
187 def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
188 data_format = "NCW"
189 padding = "VALID"
190 if stride is None:
191 stride = kernel_size
192 if not isinstance(kernel_size, int):
193 kernel_mul = reduce(lambda x, y: x * y, kernel_size)
194 else:
195 kernel_mul = kernel_size
196
197 out = ivy.avg_pool1d(
198 ivy.pow(input, norm_type),
199 kernel_size,
200 stride,
201 padding,
202 data_format=data_format,
203 ceil_mode=ceil_mode,
204 )
205 p = 1.0 / norm_type if norm_type != 0 else 1.0
206 return ivy.pow(ivy.multiply(out, kernel_mul), p)
207
208
209 @to_ivy_arrays_and_back
210 def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
211 data_format = "NCHW"
212 padding = "VALID"
213 if stride is None:
214 stride = kernel_size
215 out = ivy.avg_pool2d(
216 ivy.pow(input, norm_type),
217 kernel_size,
218 stride,
219 padding,
220 data_format=data_format,
221 ceil_mode=ceil_mode,
222 )
223 if not isinstance(kernel_size, int):
224 kernel_mul = reduce(lambda x, y: x * y, kernel_size)
225 else:
226 kernel_mul = kernel_size
227 p = ivy.divide(1.0, norm_type) if norm_type != 0 else 1.0
228 return ivy.pow(ivy.multiply(out, kernel_mul), p).astype(input.dtype)
229
230
231 @to_ivy_arrays_and_back
232 def max_pool1d(
233 input,
234 kernel_size,
235 stride=None,
236 padding=0,
237 ceil_mode=False,
238 dilation=1,
239 return_indices=False,
240 ):
241 if stride is None:
242 stride = kernel_size
243 data_format = "NCW"
244 return ivy.max_pool1d(
245 input,
246 kernel_size,
247 stride,
248 padding,
249 data_format=data_format,
250 dilation=dilation,
251 ceil_mode=ceil_mode,
252 )
253
254
255 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch")
256 @to_ivy_arrays_and_back
257 def max_pool2d(
258 input,
259 kernel_size,
260 stride=None,
261 padding=0,
262 dilation=1,
263 ceil_mode=False,
264 return_indices=False,
265 ):
266 if stride is None:
267 stride = kernel_size
268 return ivy.max_pool2d(
269 input,
270 kernel_size,
271 stride,
272 padding,
273 data_format="NCHW",
274 dilation=dilation,
275 ceil_mode=ceil_mode,
276 )
277
[end of ivy/functional/frontends/torch/nn/functional/pooling_functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
--- a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
+++ b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
@@ -274,3 +274,28 @@
dilation=dilation,
ceil_mode=ceil_mode,
)
+
+
+@with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch")
+@to_ivy_arrays_and_back
+def max_pool3d(
+ input,
+ kernel_size,
+ stride=None,
+ padding=0,
+ dilation=1,
+ ceil_mode=False,
+ return_indices=False,
+):
+ if stride is None:
+ stride = kernel_size
+
+ return ivy.max_pool3d(
+ input,
+ kernel_size,
+ stride,
+ padding,
+ data_format="NCDHW",
+ dilation=dilation,
+ ceil_mode=ceil_mode,
+ )
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n--- a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n+++ b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n@@ -274,3 +274,28 @@\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n+\n+\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n+@to_ivy_arrays_and_back\n+def max_pool3d(\n+ input,\n+ kernel_size,\n+ stride=None,\n+ padding=0,\n+ dilation=1,\n+ ceil_mode=False,\n+ return_indices=False,\n+):\n+ if stride is None:\n+ stride = kernel_size\n+\n+ return ivy.max_pool3d(\n+ input,\n+ kernel_size,\n+ stride,\n+ padding,\n+ data_format=\"NCDHW\",\n+ dilation=dilation,\n+ ceil_mode=ceil_mode,\n+ )\n", "issue": "max_pool3d\n\n", "before_files": [{"content": "# global\nfrom functools import reduce\n\n# local\nimport ivy\nfrom ivy import with_unsupported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _broadcast_pooling_helper(x, pool_dims: str = \"2d\", name: str = \"padding\"):\n dims = {\"1d\": 1, \"2d\": 2, \"3d\": 3}\n\n if isinstance(x, int):\n return tuple([x for _ in range(dims[pool_dims])])\n\n if len(x) == 1:\n return tuple([x[0] for _ in range(dims[pool_dims])])\n elif len(x) == dims[pool_dims]:\n return tuple(x)\n elif len(x) != dims[pool_dims]:\n raise ValueError(\n f\"`{name}` must either be a single int, \"\n f\"or a tuple of {dims[pool_dims]} ints. \"\n )\n\n\n# --- Main --- #\n# ------------ #\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"bfloat16\",\n \"float16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool1d(input, output_size):\n return ivy.adaptive_avg_pool1d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool2d(input, output_size):\n return ivy.adaptive_avg_pool2d(input, output_size)\n\n\n@to_ivy_arrays_and_back\ndef adaptive_max_pool2d(\n input,\n output_size,\n return_indices=False,\n):\n # ToDo: Add return_indices once superset is implemented\n return ivy.adaptive_max_pool2d(input, output_size)\n\n\n@to_ivy_arrays_and_back\ndef avg_pool1d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCW\"\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"1d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"1d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool1d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCHW\"\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"2d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"2d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n divisor_override=divisor_override,\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool3d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n if stride is None:\n stride = kernel_size\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"3d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"3d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool3d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCDHW\",\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n divisor_override=divisor_override,\n )\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n data_format = \"NCW\"\n padding = \"VALID\"\n if stride is None:\n stride = kernel_size\n if not isinstance(kernel_size, int):\n kernel_mul = reduce(lambda x, y: x * y, kernel_size)\n else:\n kernel_mul = kernel_size\n\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n )\n p = 1.0 / norm_type if norm_type != 0 else 1.0\n return ivy.pow(ivy.multiply(out, kernel_mul), p)\n\n\n@to_ivy_arrays_and_back\ndef lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n data_format = \"NCHW\"\n padding = \"VALID\"\n if stride is None:\n stride = kernel_size\n out = ivy.avg_pool2d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n )\n if not isinstance(kernel_size, int):\n kernel_mul = reduce(lambda x, y: x * y, kernel_size)\n else:\n kernel_mul = kernel_size\n p = ivy.divide(1.0, norm_type) if norm_type != 0 else 1.0\n return ivy.pow(ivy.multiply(out, kernel_mul), p).astype(input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef max_pool1d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n dilation=1,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCW\"\n return ivy.max_pool1d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n\n\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef max_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n dilation=1,\n ceil_mode=False,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n return ivy.max_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCHW\",\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n", "path": "ivy/functional/frontends/torch/nn/functional/pooling_functions.py"}]} | 3,055 | 260 |
gh_patches_debug_11674 | rasdani/github-patches | git_diff | sopel-irc__sopel-1270 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[wikipedia] Error on nonexistent article link
I productively (? :laughing:) found another issue in the `wikipedia` module while testing my fix for #1255. It turns out that, if someone sends a link to a Wikipedia article that doesn't exist, the bot spits out a nice `KeyError`. Better to spit out a clean error (or, perhaps, nothing).
As with #1255, I'm testing a fix for this on my own Sopel instance. Once I'm happy with it (and the wording of whatever error message I add), there will be a PR for feedback before merging.
</issue>
<code>
[start of sopel/modules/wikipedia.py]
1 # coding=utf-8
2 # Copyright 2013 Elsie Powell - embolalia.com
3 # Licensed under the Eiffel Forum License 2.
4 from __future__ import unicode_literals, absolute_import, print_function, division
5 from sopel import web, tools
6 from sopel.config.types import StaticSection, ValidatedAttribute
7 from sopel.module import NOLIMIT, commands, example, rule
8 import json
9 import re
10
11 import sys
12 if sys.version_info.major < 3:
13 from urlparse import unquote as _unquote
14 unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')
15 else:
16 from urllib.parse import unquote
17
18 REDIRECT = re.compile(r'^REDIRECT (.*)')
19
20
21 class WikipediaSection(StaticSection):
22 default_lang = ValidatedAttribute('default_lang', default='en')
23 """The default language to find articles from."""
24 lang_per_channel = ValidatedAttribute('lang_per_channel')
25
26
27 def setup(bot):
28 bot.config.define_section('wikipedia', WikipediaSection)
29
30 regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')
31 if not bot.memory.contains('url_callbacks'):
32 bot.memory['url_callbacks'] = tools.SopelMemory()
33 bot.memory['url_callbacks'][regex] = mw_info
34
35
36 def configure(config):
37 config.define_section('wikipedia', WikipediaSection)
38 config.wikipedia.configure_setting(
39 'default_lang',
40 "Enter the default language to find articles from."
41 )
42
43
44 def mw_search(server, query, num):
45 """
46 Searches the specified MediaWiki server for the given query, and returns
47 the specified number of results.
48 """
49 search_url = ('http://%s/w/api.php?format=json&action=query'
50 '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'
51 '&srsearch=') % (server, num)
52 search_url += query
53 query = json.loads(web.get(search_url))
54 if 'query' in query:
55 query = query['query']['search']
56 return [r['title'] for r in query]
57 else:
58 return None
59
60
61 def say_snippet(bot, server, query, show_url=True):
62 page_name = query.replace('_', ' ')
63 query = query.replace(' ', '_')
64 snippet = mw_snippet(server, query)
65 msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
66 if show_url:
67 msg = msg + ' | https://{}/wiki/{}'.format(server, query)
68 bot.say(msg)
69
70
71 def mw_snippet(server, query):
72 """
73 Retrives a snippet of the specified length from the given page on the given
74 server.
75 """
76 snippet_url = ('https://' + server + '/w/api.php?format=json'
77 '&action=query&prop=extracts&exintro&explaintext'
78 '&exchars=300&redirects&titles=')
79 snippet_url += query
80 snippet = json.loads(web.get(snippet_url))
81 snippet = snippet['query']['pages']
82
83 # For some reason, the API gives the page *number* as the key, so we just
84 # grab the first page number in the results.
85 snippet = snippet[list(snippet.keys())[0]]
86
87 return snippet['extract']
88
89
90 @rule('.*\/([a-z]+\.wikipedia.org)\/wiki\/((?!File\:)[^ ]+).*')
91 def mw_info(bot, trigger, found_match=None):
92 """
93 Retrives a snippet of the specified length from the given page on the given
94 server.
95 """
96 match = found_match or trigger
97 say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)
98
99
100 @commands('w', 'wiki', 'wik')
101 @example('.w San Francisco')
102 def wikipedia(bot, trigger):
103 lang = bot.config.wikipedia.default_lang
104
105 #change lang if channel has custom language set
106 if (trigger.sender and not trigger.sender.is_nick() and
107 bot.config.wikipedia.lang_per_channel):
108 customlang = re.search('(' + trigger.sender + '):(\w+)',
109 bot.config.wikipedia.lang_per_channel)
110 if customlang is not None:
111 lang = customlang.group(2)
112
113 if trigger.group(2) is None:
114 bot.reply("What do you want me to look up?")
115 return NOLIMIT
116
117 query = trigger.group(2)
118 args = re.search(r'^-([a-z]{2,12})\s(.*)', query)
119 if args is not None:
120 lang = args.group(1)
121 query = args.group(2)
122
123 if not query:
124 bot.reply('What do you want me to look up?')
125 return NOLIMIT
126 server = lang + '.wikipedia.org'
127 query = mw_search(server, query, 1)
128 if not query:
129 bot.reply("I can't find any results for that.")
130 return NOLIMIT
131 else:
132 query = query[0]
133 say_snippet(bot, server, query)
134
[end of sopel/modules/wikipedia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py
--- a/sopel/modules/wikipedia.py
+++ b/sopel/modules/wikipedia.py
@@ -61,7 +61,12 @@
def say_snippet(bot, server, query, show_url=True):
page_name = query.replace('_', ' ')
query = query.replace(' ', '_')
- snippet = mw_snippet(server, query)
+ try:
+ snippet = mw_snippet(server, query)
+ except KeyError:
+ if show_url:
+ bot.say("[WIKIPEDIA] Error fetching snippet for \"{}\".".format(page_name))
+ return
msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
if show_url:
msg = msg + ' | https://{}/wiki/{}'.format(server, query)
| {"golden_diff": "diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py\n--- a/sopel/modules/wikipedia.py\n+++ b/sopel/modules/wikipedia.py\n@@ -61,7 +61,12 @@\n def say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n- snippet = mw_snippet(server, query)\n+ try:\n+ snippet = mw_snippet(server, query)\n+ except KeyError:\n+ if show_url:\n+ bot.say(\"[WIKIPEDIA] Error fetching snippet for \\\"{}\\\".\".format(page_name))\n+ return\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n", "issue": "[wikipedia] Error on nonexistent article link\nI productively (? :laughing:) found another issue in the `wikipedia` module while testing my fix for #1255. It turns out that, if someone sends a link to a Wikipedia article that doesn't exist, the bot spits out a nice `KeyError`. Better to spit out a clean error (or, perhaps, nothing).\r\n\r\nAs with #1255, I'm testing a fix for this on my own Sopel instance. Once I'm happy with it (and the wording of whatever error message I add), there will be a PR for feedback before merging.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell - embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ValidatedAttribute\nfrom sopel.module import NOLIMIT, commands, example, rule\nimport json\nimport re\n\nimport sys\nif sys.version_info.major < 3:\n from urlparse import unquote as _unquote\n unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\nelse:\n from urllib.parse import unquote\n\nREDIRECT = re.compile(r'^REDIRECT (.*)')\n\n\nclass WikipediaSection(StaticSection):\n default_lang = ValidatedAttribute('default_lang', default='en')\n \"\"\"The default language to find articles from.\"\"\"\n lang_per_channel = ValidatedAttribute('lang_per_channel')\n\n\ndef setup(bot):\n bot.config.define_section('wikipedia', WikipediaSection)\n\n regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n bot.memory['url_callbacks'][regex] = mw_info\n\n\ndef configure(config):\n config.define_section('wikipedia', WikipediaSection)\n config.wikipedia.configure_setting(\n 'default_lang',\n \"Enter the default language to find articles from.\"\n )\n\n\ndef mw_search(server, query, num):\n \"\"\"\n Searches the specified MediaWiki server for the given query, and returns\n the specified number of results.\n \"\"\"\n search_url = ('http://%s/w/api.php?format=json&action=query'\n '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'\n '&srsearch=') % (server, num)\n search_url += query\n query = json.loads(web.get(search_url))\n if 'query' in query:\n query = query['query']['search']\n return [r['title'] for r in query]\n else:\n return None\n\n\ndef say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n snippet = mw_snippet(server, query)\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n bot.say(msg)\n\n\ndef mw_snippet(server, query):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n snippet_url = ('https://' + server + '/w/api.php?format=json'\n '&action=query&prop=extracts&exintro&explaintext'\n '&exchars=300&redirects&titles=')\n snippet_url += query\n snippet = json.loads(web.get(snippet_url))\n snippet = snippet['query']['pages']\n\n # For some reason, the API gives the page *number* as the key, so we just\n # grab the first page number in the results.\n snippet = snippet[list(snippet.keys())[0]]\n\n return snippet['extract']\n\n\n@rule('.*\\/([a-z]+\\.wikipedia.org)\\/wiki\\/((?!File\\:)[^ ]+).*')\ndef mw_info(bot, trigger, found_match=None):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n match = found_match or trigger\n say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n\n\n@commands('w', 'wiki', 'wik')\n@example('.w San Francisco')\ndef wikipedia(bot, trigger):\n lang = bot.config.wikipedia.default_lang\n\n #change lang if channel has custom language set\n if (trigger.sender and not trigger.sender.is_nick() and\n bot.config.wikipedia.lang_per_channel):\n customlang = re.search('(' + trigger.sender + '):(\\w+)',\n bot.config.wikipedia.lang_per_channel)\n if customlang is not None:\n lang = customlang.group(2)\n\n if trigger.group(2) is None:\n bot.reply(\"What do you want me to look up?\")\n return NOLIMIT\n\n query = trigger.group(2)\n args = re.search(r'^-([a-z]{2,12})\\s(.*)', query)\n if args is not None:\n lang = args.group(1)\n query = args.group(2)\n\n if not query:\n bot.reply('What do you want me to look up?')\n return NOLIMIT\n server = lang + '.wikipedia.org'\n query = mw_search(server, query, 1)\n if not query:\n bot.reply(\"I can't find any results for that.\")\n return NOLIMIT\n else:\n query = query[0]\n say_snippet(bot, server, query)\n", "path": "sopel/modules/wikipedia.py"}]} | 2,068 | 191 |
gh_patches_debug_39425 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-3682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] "Setting ds_accelerator" uses `print` not `logger`
**Describe the bug**
When `accelerate` tries to run any command while `DeepSpeed` is available, we get `"Setting ds_accelerator ..."`, which while not explicitly bad, is making a test fail due to a non-clean CLI print when non-deepspeed code is used (we have a check for `is_deepspeed_available()` which is probably triggering this.
As a nice QOL, it would be good to have `get_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L102) and `set_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L109) use the already-existing logger in the framework if possible, so that we can disable these annoying prints when they're not needed :)
**To Reproduce**
Steps to reproduce the behavior:
1. `pip install accelerate deepspeed -U`
2. Create a file with:
```python
from accelerate.commands.tpu import tpu_command_parser, tpu_command_launcher
parser = tpu_command_parser()
args = parser.parse_args([
"--config_file", "tests/test_configs/latest.yaml",
"--install_accelerate",
"--debug"
])
tpu_command_launcher(args)
```
3. Run `python {my_file_name.py}`
4. Should print:
```bash
Setting ds_accelerator to cuda (auto detect)
Running gcloud compute tpus tpu-vm ssh test-tpu --zone us-central1-a --command cd /usr/share; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all
```
**Expected behavior**
A configurable option to silence these print statements by having them run through the logging system instead, perhaps as an `info` or as a `debug`.
</issue>
<code>
[start of accelerator/real_accelerator.py]
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5 import os
6
7 try:
8 from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
9 except ImportError as e:
10 dsa1 = None
11 try:
12 from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
13 except ImportError as e:
14 dsa2 = None
15
16 ds_accelerator = None
17
18
19 def _validate_accelerator(accel_obj):
20 # because abstract_accelerator has different path during
21 # build time (accelerator.abstract_accelerator)
22 # and run time (deepspeed.accelerator.abstract_accelerator)
23 # and extension would import the
24 # run time abstract_accelerator/DeepSpeedAccelerator as its base
25 # class, so we need to compare accel_obj with both base class.
26 # if accel_obj is instance of DeepSpeedAccelerator in one of
27 # accelerator.abstractor_accelerator
28 # or deepspeed.accelerator.abstract_accelerator, consider accel_obj
29 # is a conforming object
30 if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):
31 raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')
32
33 # TODO: turn off is_available test since this breaks tests
34 #assert accel_obj.is_available(), \
35 # f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
36
37
38 def get_accelerator():
39 global ds_accelerator
40 if ds_accelerator is not None:
41 return ds_accelerator
42
43 accelerator_name = None
44 ds_set_method = None
45 # 1. Detect whether there is override of DeepSpeed accelerators from environment variable.
46 # DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'
47 if 'DS_ACCELERATOR' in os.environ.keys():
48 accelerator_name = os.environ['DS_ACCELERATOR']
49 if accelerator_name == 'xpu':
50 try:
51 from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401
52 except ImportError as e:
53 raise ValueError(
54 f'XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.')
55 elif accelerator_name == 'cpu':
56 try:
57 import intel_extension_for_pytorch # noqa: F401
58 except ImportError as e:
59 raise ValueError(
60 f'CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.')
61 elif accelerator_name == 'cuda':
62 pass
63 else:
64 raise ValueError(
65 f'DS_ACCELERATOR must be one of "cuda", "cpu", or "xpu". Value "{accelerator_name}" is not supported')
66 ds_set_method = 'override'
67
68 # 2. If no override, detect which accelerator to use automatically
69 if accelerator_name == None:
70 try:
71 from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811
72 accelerator_name = 'xpu'
73 except ImportError as e:
74 # We need a way to choose between CUDA_Accelerator and CPU_Accelerator
75 # Currently we detect whether intel_extension_for_pytorch is installed
76 # in the environment and use CPU_Accelerator if the answer is True.
77 # An alternative might be detect whether CUDA device is installed on
78 # the system but this comes with two pitfalls:
79 # 1. the system may not have torch pre-installed, so
80 # get_accelerator().is_available() may not work.
81 # 2. Some scenario like install on login node (without CUDA device)
82 # and run on compute node (with CUDA device) may cause mismatch
83 # between installation time and runtime.
84 try:
85 import intel_extension_for_pytorch # noqa: F401,F811
86 accelerator_name = 'cpu'
87 except ImportError as e:
88 accelerator_name = 'cuda'
89 ds_set_method = 'auto detect'
90
91 # 3. Set ds_accelerator accordingly
92 if accelerator_name == 'cuda':
93 from .cuda_accelerator import CUDA_Accelerator
94 ds_accelerator = CUDA_Accelerator()
95 elif accelerator_name == 'cpu':
96 from .cpu_accelerator import CPU_Accelerator
97 ds_accelerator = CPU_Accelerator()
98 elif accelerator_name == 'xpu':
99 # XPU_Accelerator is already imported in detection stage
100 ds_accelerator = XPU_Accelerator()
101 _validate_accelerator(ds_accelerator)
102 print(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
103 return ds_accelerator
104
105
106 def set_accelerator(accel_obj):
107 global ds_accelerator
108 _validate_accelerator(accel_obj)
109 print(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
110 ds_accelerator = accel_obj
111
112
113 '''
114 -----------[code] test_get.py -----------
115 from deepspeed.accelerator import get_accelerator
116 my_accelerator = get_accelerator()
117 print(f'{my_accelerator._name=}')
118 print(f'{my_accelerator._communication_backend=}')
119 print(f'{my_accelerator.HalfTensor().device=}')
120 print(f'{my_accelerator.total_memory()=}')
121 -----------[code] test_get.py -----------
122
123 ---[output] python test_get.py---------
124 my_accelerator.name()='cuda'
125 my_accelerator.communication_backend='nccl'
126 my_accelerator.HalfTensor().device=device(type='cuda', index=0)
127 my_accelerator.total_memory()=34089730048
128 ---[output] python test_get.py---------
129
130 **************************************************************************
131 -----------[code] test_set.py -----------
132 from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
133 cu_accel = CUDA_Accelerator()
134 print(f'{id(cu_accel)=}')
135 from deepspeed.accelerator import set_accelerator, get_accelerator
136 set_accelerator(cu_accel)
137
138 my_accelerator = get_accelerator()
139 print(f'{id(my_accelerator)=}')
140 print(f'{my_accelerator._name=}')
141 print(f'{my_accelerator._communication_backend=}')
142 print(f'{my_accelerator.HalfTensor().device=}')
143 print(f'{my_accelerator.total_memory()=}')
144 -----------[code] test_set.py -----------
145
146
147 ---[output] python test_set.py---------
148 id(cu_accel)=139648165478304
149 my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
150 my_accelerator.name='cuda'
151 my_accelerator.communication_backend='nccl'
152 my_accelerator.HalfTensor().device=device(type='cuda', index=0)
153 my_accelerator.total_memory()=34089730048
154 ---[output] python test_set.py---------
155 '''
156
[end of accelerator/real_accelerator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/accelerator/real_accelerator.py b/accelerator/real_accelerator.py
--- a/accelerator/real_accelerator.py
+++ b/accelerator/real_accelerator.py
@@ -4,6 +4,13 @@
# DeepSpeed Team
import os
+try:
+ # Importing logger currently requires that torch is installed, hence the try...except
+ # TODO: Remove logger dependency on torch.
+ from deepspeed.utils import logger as accel_logger
+except ImportError as e:
+ accel_logger = None
+
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
@@ -99,14 +106,16 @@
# XPU_Accelerator is already imported in detection stage
ds_accelerator = XPU_Accelerator()
_validate_accelerator(ds_accelerator)
- print(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
+ if accel_logger is not None:
+ accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
- print(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
+ if accel_logger is not None:
+ accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
ds_accelerator = accel_obj
@@ -114,10 +123,10 @@
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
-print(f'{my_accelerator._name=}')
-print(f'{my_accelerator._communication_backend=}')
-print(f'{my_accelerator.HalfTensor().device=}')
-print(f'{my_accelerator.total_memory()=}')
+logger.info(f'{my_accelerator._name=}')
+logger.info(f'{my_accelerator._communication_backend=}')
+logger.info(f'{my_accelerator.HalfTensor().device=}')
+logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
@@ -131,16 +140,16 @@
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
-print(f'{id(cu_accel)=}')
+logger.info(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
-print(f'{id(my_accelerator)=}')
-print(f'{my_accelerator._name=}')
-print(f'{my_accelerator._communication_backend=}')
-print(f'{my_accelerator.HalfTensor().device=}')
-print(f'{my_accelerator.total_memory()=}')
+logger.info(f'{id(my_accelerator)=}')
+logger.info(f'{my_accelerator._name=}')
+logger.info(f'{my_accelerator._communication_backend=}')
+logger.info(f'{my_accelerator.HalfTensor().device=}')
+logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
| {"golden_diff": "diff --git a/accelerator/real_accelerator.py b/accelerator/real_accelerator.py\n--- a/accelerator/real_accelerator.py\n+++ b/accelerator/real_accelerator.py\n@@ -4,6 +4,13 @@\n # DeepSpeed Team\n import os\n \n+try:\n+ # Importing logger currently requires that torch is installed, hence the try...except\n+ # TODO: Remove logger dependency on torch.\n+ from deepspeed.utils import logger as accel_logger\n+except ImportError as e:\n+ accel_logger = None\n+\n try:\n from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1\n except ImportError as e:\n@@ -99,14 +106,16 @@\n # XPU_Accelerator is already imported in detection stage\n ds_accelerator = XPU_Accelerator()\n _validate_accelerator(ds_accelerator)\n- print(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n+ if accel_logger is not None:\n+ accel_logger.info(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n return ds_accelerator\n \n \n def set_accelerator(accel_obj):\n global ds_accelerator\n _validate_accelerator(accel_obj)\n- print(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n+ if accel_logger is not None:\n+ accel_logger.info(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n ds_accelerator = accel_obj\n \n \n@@ -114,10 +123,10 @@\n -----------[code] test_get.py -----------\n from deepspeed.accelerator import get_accelerator\n my_accelerator = get_accelerator()\n-print(f'{my_accelerator._name=}')\n-print(f'{my_accelerator._communication_backend=}')\n-print(f'{my_accelerator.HalfTensor().device=}')\n-print(f'{my_accelerator.total_memory()=}')\n+logger.info(f'{my_accelerator._name=}')\n+logger.info(f'{my_accelerator._communication_backend=}')\n+logger.info(f'{my_accelerator.HalfTensor().device=}')\n+logger.info(f'{my_accelerator.total_memory()=}')\n -----------[code] test_get.py -----------\n \n ---[output] python test_get.py---------\n@@ -131,16 +140,16 @@\n -----------[code] test_set.py -----------\n from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator\n cu_accel = CUDA_Accelerator()\n-print(f'{id(cu_accel)=}')\n+logger.info(f'{id(cu_accel)=}')\n from deepspeed.accelerator import set_accelerator, get_accelerator\n set_accelerator(cu_accel)\n \n my_accelerator = get_accelerator()\n-print(f'{id(my_accelerator)=}')\n-print(f'{my_accelerator._name=}')\n-print(f'{my_accelerator._communication_backend=}')\n-print(f'{my_accelerator.HalfTensor().device=}')\n-print(f'{my_accelerator.total_memory()=}')\n+logger.info(f'{id(my_accelerator)=}')\n+logger.info(f'{my_accelerator._name=}')\n+logger.info(f'{my_accelerator._communication_backend=}')\n+logger.info(f'{my_accelerator.HalfTensor().device=}')\n+logger.info(f'{my_accelerator.total_memory()=}')\n -----------[code] test_set.py -----------\n", "issue": "[BUG] \"Setting ds_accelerator\" uses `print` not `logger`\n**Describe the bug**\r\nWhen `accelerate` tries to run any command while `DeepSpeed` is available, we get `\"Setting ds_accelerator ...\"`, which while not explicitly bad, is making a test fail due to a non-clean CLI print when non-deepspeed code is used (we have a check for `is_deepspeed_available()` which is probably triggering this.\r\n\r\nAs a nice QOL, it would be good to have `get_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L102) and `set_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L109) use the already-existing logger in the framework if possible, so that we can disable these annoying prints when they're not needed :)\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `pip install accelerate deepspeed -U`\r\n2. Create a file with:\r\n```python\r\nfrom accelerate.commands.tpu import tpu_command_parser, tpu_command_launcher\r\n\r\nparser = tpu_command_parser()\r\nargs = parser.parse_args([\r\n \"--config_file\", \"tests/test_configs/latest.yaml\",\r\n \"--install_accelerate\",\r\n \"--debug\"\r\n])\r\n\r\ntpu_command_launcher(args)\r\n```\r\n3. Run `python {my_file_name.py}`\r\n4. Should print:\r\n```bash\r\nSetting ds_accelerator to cuda (auto detect)\r\nRunning gcloud compute tpus tpu-vm ssh test-tpu --zone us-central1-a --command cd /usr/share; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all\r\n```\r\n\r\n**Expected behavior**\r\n\r\nA configurable option to silence these print statements by having them run through the logging system instead, perhaps as an `info` or as a `debug`. \n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\nimport os\n\ntry:\n from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1\nexcept ImportError as e:\n dsa1 = None\ntry:\n from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2\nexcept ImportError as e:\n dsa2 = None\n\nds_accelerator = None\n\n\ndef _validate_accelerator(accel_obj):\n # because abstract_accelerator has different path during\n # build time (accelerator.abstract_accelerator)\n # and run time (deepspeed.accelerator.abstract_accelerator)\n # and extension would import the\n # run time abstract_accelerator/DeepSpeedAccelerator as its base\n # class, so we need to compare accel_obj with both base class.\n # if accel_obj is instance of DeepSpeedAccelerator in one of\n # accelerator.abstractor_accelerator\n # or deepspeed.accelerator.abstract_accelerator, consider accel_obj\n # is a conforming object\n if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):\n raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')\n\n # TODO: turn off is_available test since this breaks tests\n #assert accel_obj.is_available(), \\\n # f'{accel_obj.__class__.__name__} accelerator fails is_available() test'\n\n\ndef get_accelerator():\n global ds_accelerator\n if ds_accelerator is not None:\n return ds_accelerator\n\n accelerator_name = None\n ds_set_method = None\n # 1. Detect whether there is override of DeepSpeed accelerators from environment variable.\n # DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'\n if 'DS_ACCELERATOR' in os.environ.keys():\n accelerator_name = os.environ['DS_ACCELERATOR']\n if accelerator_name == 'xpu':\n try:\n from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401\n except ImportError as e:\n raise ValueError(\n f'XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.')\n elif accelerator_name == 'cpu':\n try:\n import intel_extension_for_pytorch # noqa: F401\n except ImportError as e:\n raise ValueError(\n f'CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.')\n elif accelerator_name == 'cuda':\n pass\n else:\n raise ValueError(\n f'DS_ACCELERATOR must be one of \"cuda\", \"cpu\", or \"xpu\". Value \"{accelerator_name}\" is not supported')\n ds_set_method = 'override'\n\n # 2. If no override, detect which accelerator to use automatically\n if accelerator_name == None:\n try:\n from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811\n accelerator_name = 'xpu'\n except ImportError as e:\n # We need a way to choose between CUDA_Accelerator and CPU_Accelerator\n # Currently we detect whether intel_extension_for_pytorch is installed\n # in the environment and use CPU_Accelerator if the answer is True.\n # An alternative might be detect whether CUDA device is installed on\n # the system but this comes with two pitfalls:\n # 1. the system may not have torch pre-installed, so\n # get_accelerator().is_available() may not work.\n # 2. Some scenario like install on login node (without CUDA device)\n # and run on compute node (with CUDA device) may cause mismatch\n # between installation time and runtime.\n try:\n import intel_extension_for_pytorch # noqa: F401,F811\n accelerator_name = 'cpu'\n except ImportError as e:\n accelerator_name = 'cuda'\n ds_set_method = 'auto detect'\n\n # 3. Set ds_accelerator accordingly\n if accelerator_name == 'cuda':\n from .cuda_accelerator import CUDA_Accelerator\n ds_accelerator = CUDA_Accelerator()\n elif accelerator_name == 'cpu':\n from .cpu_accelerator import CPU_Accelerator\n ds_accelerator = CPU_Accelerator()\n elif accelerator_name == 'xpu':\n # XPU_Accelerator is already imported in detection stage\n ds_accelerator = XPU_Accelerator()\n _validate_accelerator(ds_accelerator)\n print(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n return ds_accelerator\n\n\ndef set_accelerator(accel_obj):\n global ds_accelerator\n _validate_accelerator(accel_obj)\n print(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n ds_accelerator = accel_obj\n\n\n'''\n-----------[code] test_get.py -----------\nfrom deepspeed.accelerator import get_accelerator\nmy_accelerator = get_accelerator()\nprint(f'{my_accelerator._name=}')\nprint(f'{my_accelerator._communication_backend=}')\nprint(f'{my_accelerator.HalfTensor().device=}')\nprint(f'{my_accelerator.total_memory()=}')\n-----------[code] test_get.py -----------\n\n---[output] python test_get.py---------\nmy_accelerator.name()='cuda'\nmy_accelerator.communication_backend='nccl'\nmy_accelerator.HalfTensor().device=device(type='cuda', index=0)\nmy_accelerator.total_memory()=34089730048\n---[output] python test_get.py---------\n\n**************************************************************************\n-----------[code] test_set.py -----------\nfrom deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator\ncu_accel = CUDA_Accelerator()\nprint(f'{id(cu_accel)=}')\nfrom deepspeed.accelerator import set_accelerator, get_accelerator\nset_accelerator(cu_accel)\n\nmy_accelerator = get_accelerator()\nprint(f'{id(my_accelerator)=}')\nprint(f'{my_accelerator._name=}')\nprint(f'{my_accelerator._communication_backend=}')\nprint(f'{my_accelerator.HalfTensor().device=}')\nprint(f'{my_accelerator.total_memory()=}')\n-----------[code] test_set.py -----------\n\n\n---[output] python test_set.py---------\nid(cu_accel)=139648165478304\nmy_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>\nmy_accelerator.name='cuda'\nmy_accelerator.communication_backend='nccl'\nmy_accelerator.HalfTensor().device=device(type='cuda', index=0)\nmy_accelerator.total_memory()=34089730048\n---[output] python test_set.py---------\n'''\n", "path": "accelerator/real_accelerator.py"}]} | 2,827 | 729 |
gh_patches_debug_28287 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3904 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Speed up tip migration script
Reticketed from https://github.com/gratipay/inside.gratipay.com/issues/468#issuecomment-171707621
When we wrote this script, we thought that we'd only be using it for a month or two. Now that it's here to stay - time to work on it a bit :)
The script takes so long because we're pulling in _all_ approved teams. We could place a check in the script so that we only pull in those teams that satisfy the following criteria -
1) Team owner must have 0+ tips from Gratipay 1.0
2) Team must have zero `payment_instructions` that have a `ctime` lesser than the team's `ctime` (i.e. migrated tips). We use the `ctime` attribute to differentiate `payment_instructions` that were created as a result of migrated tips vs created in Gratipay 2.0
https://github.com/gratipay/gratipay.com/blob/master/bin/migrate-tips.py#L6-L10
</issue>
<code>
[start of gratipay/models/team.py]
1 """Teams on Gratipay receive payments and distribute payroll.
2 """
3 import requests
4 from aspen import json, log
5 from gratipay.models import add_event
6 from postgres.orm import Model
7
8
9 class Team(Model):
10 """Represent a Gratipay team.
11 """
12
13 typname = 'teams'
14
15 def __eq__(self, other):
16 if not isinstance(other, Team):
17 return False
18 return self.id == other.id
19
20 def __ne__(self, other):
21 if not isinstance(other, Team):
22 return True
23 return self.id != other.id
24
25
26 # Constructors
27 # ============
28
29 @classmethod
30 def from_id(cls, id):
31 """Return an existing team based on id.
32 """
33 return cls._from_thing("id", id)
34
35 @classmethod
36 def from_slug(cls, slug):
37 """Return an existing team based on slug.
38 """
39 return cls._from_thing("slug_lower", slug.lower())
40
41 @classmethod
42 def _from_thing(cls, thing, value):
43 assert thing in ("id", "slug_lower")
44 return cls.db.one("""
45
46 SELECT teams.*::teams
47 FROM teams
48 WHERE {}=%s
49
50 """.format(thing), (value,))
51
52 @classmethod
53 def insert(cls, owner, **fields):
54 fields['slug_lower'] = fields['slug'].lower()
55 fields['owner'] = owner.username
56 return cls.db.one("""
57
58 INSERT INTO teams
59 (slug, slug_lower, name, homepage,
60 product_or_service, todo_url, onboarding_url,
61 owner)
62 VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,
63 %(product_or_service)s, %(todo_url)s, %(onboarding_url)s,
64 %(owner)s)
65 RETURNING teams.*::teams
66
67 """, fields)
68
69
70 def create_github_review_issue(self):
71 """POST to GitHub, and return the URL of the new issue.
72 """
73 api_url = "https://api.github.com/repos/{}/issues".format(self.review_repo)
74 data = json.dumps({ "title": self.name
75 , "body": "https://gratipay.com/{}/\n\n".format(self.slug) +
76 "(This application will remain open for at least a week.)"
77 })
78 out = ''
79 try:
80 r = requests.post(api_url, auth=self.review_auth, data=data)
81 if r.status_code == 201:
82 out = r.json()['html_url']
83 else:
84 log(r.status_code)
85 log(r.text)
86 err = str(r.status_code)
87 except:
88 err = "eep"
89 if not out:
90 out = "https://github.com/gratipay/team-review/issues#error-{}".format(err)
91 return out
92
93
94 def set_review_url(self, review_url):
95 self.db.run("UPDATE teams SET review_url=%s WHERE id=%s", (review_url, self.id))
96 self.set_attributes(review_url=review_url)
97
98
99 def get_og_title(self):
100 out = self.name
101 receiving = self.receiving
102 if receiving > 0:
103 out += " receives $%.2f/wk" % receiving
104 else:
105 out += " is"
106 return out + " on Gratipay"
107
108
109 def update_receiving(self, cursor=None):
110 r = (cursor or self.db).one("""
111 WITH our_receiving AS (
112 SELECT amount
113 FROM current_payment_instructions
114 JOIN participants p ON p.username = participant
115 WHERE team = %(slug)s
116 AND p.is_suspicious IS NOT true
117 AND amount > 0
118 AND is_funded
119 )
120 UPDATE teams t
121 SET receiving = COALESCE((SELECT sum(amount) FROM our_receiving), 0)
122 , nreceiving_from = COALESCE((SELECT count(*) FROM our_receiving), 0)
123 , distributing = COALESCE((SELECT sum(amount) FROM our_receiving), 0)
124 , ndistributing_to = 1
125 WHERE t.slug = %(slug)s
126 RETURNING receiving, nreceiving_from, distributing, ndistributing_to
127 """, dict(slug=self.slug))
128
129
130 # This next step is easy for now since we don't have payroll.
131 from gratipay.models.participant import Participant
132 Participant.from_username(self.owner).update_taking(cursor or self.db)
133
134 self.set_attributes( receiving=r.receiving
135 , nreceiving_from=r.nreceiving_from
136 , distributing=r.distributing
137 , ndistributing_to=r.ndistributing_to
138 )
139
140 @property
141 def status(self):
142 return { None: 'unreviewed'
143 , False: 'rejected'
144 , True: 'approved'
145 }[self.is_approved]
146
147 def to_dict(self):
148 return {
149 'homepage': self.homepage,
150 'name': self.name,
151 'nreceiving_from': self.nreceiving_from,
152 'onboarding_url': self.onboarding_url,
153 'owner': '~' + self.owner,
154 'receiving': self.receiving,
155 'slug': self.slug,
156 'status': self.status,
157 'todo_url': self.todo_url
158 }
159
160 def migrate_tips(self):
161 payment_instructions = self.db.all("""
162 SELECT pi.*
163 FROM payment_instructions pi
164 JOIN teams t ON t.slug = pi.team
165 JOIN participants p ON t.owner = p.username
166 WHERE p.username = %s
167 AND pi.ctime < t.ctime
168 """, (self.owner, ))
169
170 # Make sure the migration hasn't been done already
171 if payment_instructions:
172 raise AlreadyMigrated
173
174 return self.db.one("""
175 WITH rows AS (
176
177 INSERT INTO payment_instructions
178 (ctime, mtime, participant, team, amount, is_funded)
179 SELECT ct.ctime
180 , ct.mtime
181 , ct.tipper
182 , %(slug)s
183 , ct.amount
184 , ct.is_funded
185 FROM current_tips ct
186 JOIN participants p ON p.username = tipper
187 WHERE ct.tippee=%(owner)s
188 AND p.claimed_time IS NOT NULL
189 AND p.is_suspicious IS NOT TRUE
190 AND p.is_closed IS NOT TRUE
191 RETURNING 1
192
193 ) SELECT count(*) FROM rows;
194 """, {'slug': self.slug, 'owner': self.owner})
195
196
197 # Images
198 # ======
199
200 IMAGE_SIZES = ('original', 'large', 'small')
201
202 def get_image_url(self, size):
203 assert size in ('original', 'large', 'small'), size
204 return '/{}/image?size={}'.format(self.slug, size)
205
206 def save_image(self, original, large, small, image_type):
207 with self.db.get_cursor() as c:
208 oids = {}
209 for size in self.IMAGE_SIZES:
210 lobject = c.connection.lobject(getattr(self, 'image_oid_'+size), mode='wb')
211 lobject.write(locals()[size])
212 oids[size] = lobject.oid
213 lobject.close()
214
215 c.run("""UPDATE teams
216 SET image_oid_original=%s, image_oid_large=%s, image_oid_small=%s
217 , image_type=%s
218 WHERE id=%s"""
219 , (oids['original'], oids['large'], oids['small'], image_type, self.id)
220 )
221 add_event(c, 'team', dict( action='upsert_image'
222 , id=self.id
223 , **oids
224 ))
225 self.set_attributes( image_type=image_type
226 , **{'image_oid_'+size: oids[size] for size in oids}
227 )
228 return oids
229
230 def load_image(self, size):
231 assert size in self.IMAGE_SIZES, size
232 image = None
233 oid = getattr(self, 'image_oid_{}'.format(size))
234 if oid != 0:
235 with self.db.get_connection() as c:
236 image = c.lobject(oid, mode='rb').read()
237 return image
238
239
240 class AlreadyMigrated(Exception): pass
241
[end of gratipay/models/team.py]
[start of bin/migrate-tips.py]
1 from gratipay.wireup import db, env
2 from gratipay.models.team import Team, AlreadyMigrated
3
4 db = db(env())
5
6 slugs = db.all("""
7 SELECT slug
8 FROM teams
9 WHERE is_approved IS TRUE
10 """)
11
12 for slug in slugs:
13 team = Team.from_slug(slug)
14 try:
15 ntips = team.migrate_tips()
16 print("Migrated {} tip(s) for '{}'".format(ntips, slug))
17 except AlreadyMigrated:
18 print("'%s' already migrated." % slug)
19
20 print("Done.")
21
[end of bin/migrate-tips.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/migrate-tips.py b/bin/migrate-tips.py
--- a/bin/migrate-tips.py
+++ b/bin/migrate-tips.py
@@ -1,20 +1,27 @@
from gratipay.wireup import db, env
-from gratipay.models.team import Team, AlreadyMigrated
+from gratipay.models.team import AlreadyMigrated
db = db(env())
-slugs = db.all("""
- SELECT slug
- FROM teams
- WHERE is_approved IS TRUE
+teams = db.all("""
+ SELECT distinct ON (t.slug) t.*::teams
+ FROM teams t
+ JOIN tips ON t.owner = tips.tippee -- Only fetch teams whose owners had tips under Gratipay 1.0
+ WHERE t.is_approved IS TRUE -- Only fetch approved teams
+ AND NOT EXISTS ( -- Make sure tips haven't been migrated for any teams with same owner
+ SELECT 1
+ FROM payment_instructions pi
+ JOIN teams t2 ON t2.slug = pi.team
+ WHERE t2.owner = t.owner
+ AND pi.ctime < t2.ctime
+ )
""")
-for slug in slugs:
- team = Team.from_slug(slug)
+for team in teams:
try:
ntips = team.migrate_tips()
- print("Migrated {} tip(s) for '{}'".format(ntips, slug))
+ print("Migrated {} tip(s) for '{}'".format(ntips, team.slug))
except AlreadyMigrated:
- print("'%s' already migrated." % slug)
+ print("'%s' already migrated." % team.slug)
print("Done.")
diff --git a/gratipay/models/team.py b/gratipay/models/team.py
--- a/gratipay/models/team.py
+++ b/gratipay/models/team.py
@@ -162,8 +162,7 @@
SELECT pi.*
FROM payment_instructions pi
JOIN teams t ON t.slug = pi.team
- JOIN participants p ON t.owner = p.username
- WHERE p.username = %s
+ WHERE t.owner = %s
AND pi.ctime < t.ctime
""", (self.owner, ))
| {"golden_diff": "diff --git a/bin/migrate-tips.py b/bin/migrate-tips.py\n--- a/bin/migrate-tips.py\n+++ b/bin/migrate-tips.py\n@@ -1,20 +1,27 @@\n from gratipay.wireup import db, env\n-from gratipay.models.team import Team, AlreadyMigrated\n+from gratipay.models.team import AlreadyMigrated\n \n db = db(env())\n \n-slugs = db.all(\"\"\"\n- SELECT slug\n- FROM teams\n- WHERE is_approved IS TRUE\n+teams = db.all(\"\"\"\n+ SELECT distinct ON (t.slug) t.*::teams\n+ FROM teams t\n+ JOIN tips ON t.owner = tips.tippee -- Only fetch teams whose owners had tips under Gratipay 1.0\n+ WHERE t.is_approved IS TRUE -- Only fetch approved teams\n+ AND NOT EXISTS ( -- Make sure tips haven't been migrated for any teams with same owner\n+ SELECT 1\n+ FROM payment_instructions pi\n+ JOIN teams t2 ON t2.slug = pi.team\n+ WHERE t2.owner = t.owner\n+ AND pi.ctime < t2.ctime\n+ )\n \"\"\")\n \n-for slug in slugs:\n- team = Team.from_slug(slug)\n+for team in teams:\n try:\n ntips = team.migrate_tips()\n- print(\"Migrated {} tip(s) for '{}'\".format(ntips, slug))\n+ print(\"Migrated {} tip(s) for '{}'\".format(ntips, team.slug))\n except AlreadyMigrated:\n- print(\"'%s' already migrated.\" % slug)\n+ print(\"'%s' already migrated.\" % team.slug)\n \n print(\"Done.\")\ndiff --git a/gratipay/models/team.py b/gratipay/models/team.py\n--- a/gratipay/models/team.py\n+++ b/gratipay/models/team.py\n@@ -162,8 +162,7 @@\n SELECT pi.*\n FROM payment_instructions pi\n JOIN teams t ON t.slug = pi.team\n- JOIN participants p ON t.owner = p.username\n- WHERE p.username = %s\n+ WHERE t.owner = %s\n AND pi.ctime < t.ctime\n \"\"\", (self.owner, ))\n", "issue": "Speed up tip migration script\nReticketed from https://github.com/gratipay/inside.gratipay.com/issues/468#issuecomment-171707621\n\nWhen we wrote this script, we thought that we'd only be using it for a month or two. Now that it's here to stay - time to work on it a bit :) \n\nThe script takes so long because we're pulling in _all_ approved teams. We could place a check in the script so that we only pull in those teams that satisfy the following criteria - \n\n1) Team owner must have 0+ tips from Gratipay 1.0\n2) Team must have zero `payment_instructions` that have a `ctime` lesser than the team's `ctime` (i.e. migrated tips). We use the `ctime` attribute to differentiate `payment_instructions` that were created as a result of migrated tips vs created in Gratipay 2.0\n\nhttps://github.com/gratipay/gratipay.com/blob/master/bin/migrate-tips.py#L6-L10\n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay receive payments and distribute payroll.\n\"\"\"\nimport requests\nfrom aspen import json, log\nfrom gratipay.models import add_event\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def insert(cls, owner, **fields):\n fields['slug_lower'] = fields['slug'].lower()\n fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage,\n product_or_service, todo_url, onboarding_url,\n owner)\n VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n %(product_or_service)s, %(todo_url)s, %(onboarding_url)s,\n %(owner)s)\n RETURNING teams.*::teams\n\n \"\"\", fields)\n\n\n def create_github_review_issue(self):\n \"\"\"POST to GitHub, and return the URL of the new issue.\n \"\"\"\n api_url = \"https://api.github.com/repos/{}/issues\".format(self.review_repo)\n data = json.dumps({ \"title\": self.name\n , \"body\": \"https://gratipay.com/{}/\\n\\n\".format(self.slug) +\n \"(This application will remain open for at least a week.)\"\n })\n out = ''\n try:\n r = requests.post(api_url, auth=self.review_auth, data=data)\n if r.status_code == 201:\n out = r.json()['html_url']\n else:\n log(r.status_code)\n log(r.text)\n err = str(r.status_code)\n except:\n err = \"eep\"\n if not out:\n out = \"https://github.com/gratipay/team-review/issues#error-{}\".format(err)\n return out\n\n\n def set_review_url(self, review_url):\n self.db.run(\"UPDATE teams SET review_url=%s WHERE id=%s\", (review_url, self.id))\n self.set_attributes(review_url=review_url)\n\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n r = (cursor or self.db).one(\"\"\"\n WITH our_receiving AS (\n SELECT amount\n FROM current_payment_instructions\n JOIN participants p ON p.username = participant\n WHERE team = %(slug)s\n AND p.is_suspicious IS NOT true\n AND amount > 0\n AND is_funded\n )\n UPDATE teams t\n SET receiving = COALESCE((SELECT sum(amount) FROM our_receiving), 0)\n , nreceiving_from = COALESCE((SELECT count(*) FROM our_receiving), 0)\n , distributing = COALESCE((SELECT sum(amount) FROM our_receiving), 0)\n , ndistributing_to = 1\n WHERE t.slug = %(slug)s\n RETURNING receiving, nreceiving_from, distributing, ndistributing_to\n \"\"\", dict(slug=self.slug))\n\n\n # This next step is easy for now since we don't have payroll.\n from gratipay.models.participant import Participant\n Participant.from_username(self.owner).update_taking(cursor or self.db)\n\n self.set_attributes( receiving=r.receiving\n , nreceiving_from=r.nreceiving_from\n , distributing=r.distributing\n , ndistributing_to=r.ndistributing_to\n )\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n\n def to_dict(self):\n return {\n 'homepage': self.homepage,\n 'name': self.name,\n 'nreceiving_from': self.nreceiving_from,\n 'onboarding_url': self.onboarding_url,\n 'owner': '~' + self.owner,\n 'receiving': self.receiving,\n 'slug': self.slug,\n 'status': self.status,\n 'todo_url': self.todo_url\n }\n\n def migrate_tips(self):\n payment_instructions = self.db.all(\"\"\"\n SELECT pi.*\n FROM payment_instructions pi\n JOIN teams t ON t.slug = pi.team\n JOIN participants p ON t.owner = p.username\n WHERE p.username = %s\n AND pi.ctime < t.ctime\n \"\"\", (self.owner, ))\n\n # Make sure the migration hasn't been done already\n if payment_instructions:\n raise AlreadyMigrated\n\n return self.db.one(\"\"\"\n WITH rows AS (\n\n INSERT INTO payment_instructions\n (ctime, mtime, participant, team, amount, is_funded)\n SELECT ct.ctime\n , ct.mtime\n , ct.tipper\n , %(slug)s\n , ct.amount\n , ct.is_funded\n FROM current_tips ct\n JOIN participants p ON p.username = tipper\n WHERE ct.tippee=%(owner)s\n AND p.claimed_time IS NOT NULL\n AND p.is_suspicious IS NOT TRUE\n AND p.is_closed IS NOT TRUE\n RETURNING 1\n\n ) SELECT count(*) FROM rows;\n \"\"\", {'slug': self.slug, 'owner': self.owner})\n\n\n # Images\n # ======\n\n IMAGE_SIZES = ('original', 'large', 'small')\n\n def get_image_url(self, size):\n assert size in ('original', 'large', 'small'), size\n return '/{}/image?size={}'.format(self.slug, size)\n\n def save_image(self, original, large, small, image_type):\n with self.db.get_cursor() as c:\n oids = {}\n for size in self.IMAGE_SIZES:\n lobject = c.connection.lobject(getattr(self, 'image_oid_'+size), mode='wb')\n lobject.write(locals()[size])\n oids[size] = lobject.oid\n lobject.close()\n\n c.run(\"\"\"UPDATE teams\n SET image_oid_original=%s, image_oid_large=%s, image_oid_small=%s\n , image_type=%s\n WHERE id=%s\"\"\"\n , (oids['original'], oids['large'], oids['small'], image_type, self.id)\n )\n add_event(c, 'team', dict( action='upsert_image'\n , id=self.id\n , **oids\n ))\n self.set_attributes( image_type=image_type\n , **{'image_oid_'+size: oids[size] for size in oids}\n )\n return oids\n\n def load_image(self, size):\n assert size in self.IMAGE_SIZES, size\n image = None\n oid = getattr(self, 'image_oid_{}'.format(size))\n if oid != 0:\n with self.db.get_connection() as c:\n image = c.lobject(oid, mode='rb').read()\n return image\n\n\nclass AlreadyMigrated(Exception): pass\n", "path": "gratipay/models/team.py"}, {"content": "from gratipay.wireup import db, env\nfrom gratipay.models.team import Team, AlreadyMigrated\n\ndb = db(env())\n\nslugs = db.all(\"\"\"\n SELECT slug\n FROM teams\n WHERE is_approved IS TRUE\n\"\"\")\n\nfor slug in slugs:\n team = Team.from_slug(slug)\n try:\n ntips = team.migrate_tips()\n print(\"Migrated {} tip(s) for '{}'\".format(ntips, slug))\n except AlreadyMigrated:\n print(\"'%s' already migrated.\" % slug)\n\nprint(\"Done.\")\n", "path": "bin/migrate-tips.py"}]} | 3,305 | 488 |
gh_patches_debug_28750 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow mkdocs.yaml config file (.yaml extension)
Even though `.yml` is commonly used, `.yaml` is actually preferred:
https://yaml.org/faq.html
> Is there an official extension for YAML files?
>
> Please use ".yaml" when possible.
It would therefore be great if you would allow `mkdocs.yaml`.
Somewhat related t #2164
Allow mkdocs.yaml when '--config' is not passed
We wanted to give our colleagues to possibility to build docs automatically when they put a `mkdocs.yml` in the docs folder of their repository. But it happened often, that they used `.yaml` instead the `.yml`.
When this PR is accepted, it will look first look for `mkdocs.yml` when `--config` is not provided. If this file is not present, it will try to find `mkdocs.yaml`.
When both are not present, it behaves like before.
When both are present, it behaves like before.
If only the `.yaml` version it present, this one will be used as the config file.
</issue>
<code>
[start of mkdocs/config/base.py]
1 import logging
2 import os
3 import sys
4 from yaml import YAMLError
5 from collections import UserDict
6 from contextlib import contextmanager
7
8 from mkdocs import exceptions
9 from mkdocs import utils
10
11
12 log = logging.getLogger('mkdocs.config')
13
14
15 class ValidationError(Exception):
16 """Raised during the validation process of the config on errors."""
17
18
19 class Config(UserDict):
20 """
21 MkDocs Configuration dict
22
23 This is a fairly simple extension of a standard dictionary. It adds methods
24 for running validation on the structure and contents.
25 """
26
27 def __init__(self, schema, config_file_path=None):
28 """
29 The schema is a Python dict which maps the config name to a validator.
30 """
31
32 self._schema = schema
33 self._schema_keys = set(dict(schema).keys())
34 # Ensure config_file_path is a Unicode string
35 if config_file_path is not None and not isinstance(config_file_path, str):
36 try:
37 # Assume config_file_path is encoded with the file system encoding.
38 config_file_path = config_file_path.decode(encoding=sys.getfilesystemencoding())
39 except UnicodeDecodeError:
40 raise ValidationError("config_file_path is not a Unicode string.")
41 self.config_file_path = config_file_path
42 self.data = {}
43
44 self.user_configs = []
45 self.set_defaults()
46
47 def set_defaults(self):
48 """
49 Set the base config by going through each validator and getting the
50 default if it has one.
51 """
52
53 for key, config_option in self._schema:
54 self[key] = config_option.default
55
56 def _validate(self):
57
58 failed, warnings = [], []
59
60 for key, config_option in self._schema:
61 try:
62 value = self.get(key)
63 self[key] = config_option.validate(value)
64 warnings.extend([(key, w) for w in config_option.warnings])
65 config_option.reset_warnings()
66 except ValidationError as e:
67 failed.append((key, e))
68
69 for key in (set(self.keys()) - self._schema_keys):
70 warnings.append((
71 key, f"Unrecognised configuration name: {key}"
72 ))
73
74 return failed, warnings
75
76 def _pre_validate(self):
77
78 failed, warnings = [], []
79
80 for key, config_option in self._schema:
81 try:
82 config_option.pre_validation(self, key_name=key)
83 warnings.extend([(key, w) for w in config_option.warnings])
84 config_option.reset_warnings()
85 except ValidationError as e:
86 failed.append((key, e))
87
88 return failed, warnings
89
90 def _post_validate(self):
91
92 failed, warnings = [], []
93
94 for key, config_option in self._schema:
95 try:
96 config_option.post_validation(self, key_name=key)
97 warnings.extend([(key, w) for w in config_option.warnings])
98 config_option.reset_warnings()
99 except ValidationError as e:
100 failed.append((key, e))
101
102 return failed, warnings
103
104 def validate(self):
105
106 failed, warnings = self._pre_validate()
107
108 run_failed, run_warnings = self._validate()
109
110 failed.extend(run_failed)
111 warnings.extend(run_warnings)
112
113 # Only run the post validation steps if there are no failures, warnings
114 # are okay.
115 if len(failed) == 0:
116 post_failed, post_warnings = self._post_validate()
117 failed.extend(post_failed)
118 warnings.extend(post_warnings)
119
120 return failed, warnings
121
122 def load_dict(self, patch):
123 """ Load config options from a dictionary. """
124
125 if not isinstance(patch, dict):
126 raise exceptions.ConfigurationError(
127 "The configuration is invalid. The expected type was a key "
128 "value mapping (a python dict) but we got an object of type: "
129 "{}".format(type(patch)))
130
131 self.user_configs.append(patch)
132 self.data.update(patch)
133
134 def load_file(self, config_file):
135 """ Load config options from the open file descriptor of a YAML file. """
136 try:
137 return self.load_dict(utils.yaml_load(config_file))
138 except YAMLError as e:
139 # MkDocs knows and understands ConfigurationErrors
140 raise exceptions.ConfigurationError(
141 f"MkDocs encountered an error parsing the configuration file: {e}"
142 )
143
144
145 @contextmanager
146 def _open_config_file(config_file):
147 """
148 A context manager which yields an open file descriptor ready to be read.
149
150 Accepts a filename as a string, an open or closed file descriptor, or None.
151 When None, it defaults to `mkdocs.yml` in the CWD. If a closed file descriptor
152 is received, a new file descriptor is opened for the same file.
153
154 The file descriptor is automaticaly closed when the context manager block is existed.
155 """
156
157 # Default to the standard config filename.
158 if config_file is None:
159 config_file = os.path.abspath('mkdocs.yml')
160
161 # If closed file descriptor, get file path to reopen later.
162 if hasattr(config_file, 'closed') and config_file.closed:
163 config_file = config_file.name
164
165 log.debug(f"Loading configuration file: {config_file}")
166
167 # If it is a string, we can assume it is a path and attempt to open it.
168 if isinstance(config_file, str):
169 if os.path.exists(config_file):
170 config_file = open(config_file, 'rb')
171 else:
172 raise exceptions.ConfigurationError(
173 f"Config file '{config_file}' does not exist.")
174
175 # Ensure file descriptor is at begining
176 config_file.seek(0)
177 try:
178 yield config_file
179 finally:
180 if hasattr(config_file, 'close'):
181 config_file.close()
182
183
184 def load_config(config_file=None, **kwargs):
185 """
186 Load the configuration for a given file object or name
187
188 The config_file can either be a file object, string or None. If it is None
189 the default `mkdocs.yml` filename will loaded.
190
191 Extra kwargs are passed to the configuration to replace any default values
192 unless they themselves are None.
193 """
194 options = kwargs.copy()
195
196 # Filter None values from the options. This usually happens with optional
197 # parameters from Click.
198 for key, value in options.copy().items():
199 if value is None:
200 options.pop(key)
201
202 with _open_config_file(config_file) as fd:
203 options['config_file_path'] = getattr(fd, 'name', '')
204
205 # Initialise the config with the default schema.
206 from mkdocs.config.defaults import get_schema
207 cfg = Config(schema=get_schema(), config_file_path=options['config_file_path'])
208 # load the config file
209 cfg.load_file(fd)
210
211 # Then load the options to overwrite anything in the config.
212 cfg.load_dict(options)
213
214 errors, warnings = cfg.validate()
215
216 for config_name, warning in warnings:
217 log.warning(f"Config value: '{config_name}'. Warning: {warning}")
218
219 for config_name, error in errors:
220 log.error(f"Config value: '{config_name}'. Error: {error}")
221
222 for key, value in cfg.items():
223 log.debug(f"Config value: '{key}' = {value!r}")
224
225 if len(errors) > 0:
226 raise exceptions.Abort(
227 "Aborted with {} Configuration Errors!".format(len(errors))
228 )
229 elif cfg['strict'] and len(warnings) > 0:
230 raise exceptions.Abort(
231 "Aborted with {} Configuration Warnings in 'strict' mode!".format(len(warnings))
232 )
233
234 return cfg
235
[end of mkdocs/config/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/config/base.py b/mkdocs/config/base.py
--- a/mkdocs/config/base.py
+++ b/mkdocs/config/base.py
@@ -156,24 +156,34 @@
# Default to the standard config filename.
if config_file is None:
- config_file = os.path.abspath('mkdocs.yml')
-
- # If closed file descriptor, get file path to reopen later.
- if hasattr(config_file, 'closed') and config_file.closed:
- config_file = config_file.name
-
- log.debug(f"Loading configuration file: {config_file}")
-
+ paths_to_try = ['mkdocs.yml', 'mkdocs.yaml']
# If it is a string, we can assume it is a path and attempt to open it.
- if isinstance(config_file, str):
- if os.path.exists(config_file):
- config_file = open(config_file, 'rb')
+ elif isinstance(config_file, str):
+ paths_to_try = [config_file]
+ # If closed file descriptor, get file path to reopen later.
+ elif getattr(config_file, 'closed', False):
+ paths_to_try = [config_file.name]
+ else:
+ paths_to_try = None
+
+ if paths_to_try:
+ # config_file is not a file descriptor, so open it as a path.
+ for path in paths_to_try:
+ path = os.path.abspath(path)
+ log.debug(f"Loading configuration file: {path}")
+ try:
+ config_file = open(path, 'rb')
+ break
+ except FileNotFoundError:
+ continue
else:
raise exceptions.ConfigurationError(
- f"Config file '{config_file}' does not exist.")
+ f"Config file '{paths_to_try[0]}' does not exist.")
+ else:
+ log.debug(f"Loading configuration file: {config_file}")
+ # Ensure file descriptor is at begining
+ config_file.seek(0)
- # Ensure file descriptor is at begining
- config_file.seek(0)
try:
yield config_file
finally:
| {"golden_diff": "diff --git a/mkdocs/config/base.py b/mkdocs/config/base.py\n--- a/mkdocs/config/base.py\n+++ b/mkdocs/config/base.py\n@@ -156,24 +156,34 @@\n \n # Default to the standard config filename.\n if config_file is None:\n- config_file = os.path.abspath('mkdocs.yml')\n-\n- # If closed file descriptor, get file path to reopen later.\n- if hasattr(config_file, 'closed') and config_file.closed:\n- config_file = config_file.name\n-\n- log.debug(f\"Loading configuration file: {config_file}\")\n-\n+ paths_to_try = ['mkdocs.yml', 'mkdocs.yaml']\n # If it is a string, we can assume it is a path and attempt to open it.\n- if isinstance(config_file, str):\n- if os.path.exists(config_file):\n- config_file = open(config_file, 'rb')\n+ elif isinstance(config_file, str):\n+ paths_to_try = [config_file]\n+ # If closed file descriptor, get file path to reopen later.\n+ elif getattr(config_file, 'closed', False):\n+ paths_to_try = [config_file.name]\n+ else:\n+ paths_to_try = None\n+\n+ if paths_to_try:\n+ # config_file is not a file descriptor, so open it as a path.\n+ for path in paths_to_try:\n+ path = os.path.abspath(path)\n+ log.debug(f\"Loading configuration file: {path}\")\n+ try:\n+ config_file = open(path, 'rb')\n+ break\n+ except FileNotFoundError:\n+ continue\n else:\n raise exceptions.ConfigurationError(\n- f\"Config file '{config_file}' does not exist.\")\n+ f\"Config file '{paths_to_try[0]}' does not exist.\")\n+ else:\n+ log.debug(f\"Loading configuration file: {config_file}\")\n+ # Ensure file descriptor is at begining\n+ config_file.seek(0)\n \n- # Ensure file descriptor is at begining\n- config_file.seek(0)\n try:\n yield config_file\n finally:\n", "issue": "Allow mkdocs.yaml config file (.yaml extension)\nEven though `.yml` is commonly used, `.yaml` is actually preferred:\r\n\r\nhttps://yaml.org/faq.html\r\n\r\n> Is there an official extension for YAML files?\r\n>\r\n> Please use \".yaml\" when possible.\r\n\r\nIt would therefore be great if you would allow `mkdocs.yaml`.\r\n\r\nSomewhat related t #2164\r\n\nAllow mkdocs.yaml when '--config' is not passed\nWe wanted to give our colleagues to possibility to build docs automatically when they put a `mkdocs.yml` in the docs folder of their repository. But it happened often, that they used `.yaml` instead the `.yml`.\r\n\r\nWhen this PR is accepted, it will look first look for `mkdocs.yml` when `--config` is not provided. If this file is not present, it will try to find `mkdocs.yaml`.\r\nWhen both are not present, it behaves like before.\r\nWhen both are present, it behaves like before.\r\nIf only the `.yaml` version it present, this one will be used as the config file.\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom yaml import YAMLError\nfrom collections import UserDict\nfrom contextlib import contextmanager\n\nfrom mkdocs import exceptions\nfrom mkdocs import utils\n\n\nlog = logging.getLogger('mkdocs.config')\n\n\nclass ValidationError(Exception):\n \"\"\"Raised during the validation process of the config on errors.\"\"\"\n\n\nclass Config(UserDict):\n \"\"\"\n MkDocs Configuration dict\n\n This is a fairly simple extension of a standard dictionary. It adds methods\n for running validation on the structure and contents.\n \"\"\"\n\n def __init__(self, schema, config_file_path=None):\n \"\"\"\n The schema is a Python dict which maps the config name to a validator.\n \"\"\"\n\n self._schema = schema\n self._schema_keys = set(dict(schema).keys())\n # Ensure config_file_path is a Unicode string\n if config_file_path is not None and not isinstance(config_file_path, str):\n try:\n # Assume config_file_path is encoded with the file system encoding.\n config_file_path = config_file_path.decode(encoding=sys.getfilesystemencoding())\n except UnicodeDecodeError:\n raise ValidationError(\"config_file_path is not a Unicode string.\")\n self.config_file_path = config_file_path\n self.data = {}\n\n self.user_configs = []\n self.set_defaults()\n\n def set_defaults(self):\n \"\"\"\n Set the base config by going through each validator and getting the\n default if it has one.\n \"\"\"\n\n for key, config_option in self._schema:\n self[key] = config_option.default\n\n def _validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n value = self.get(key)\n self[key] = config_option.validate(value)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n for key in (set(self.keys()) - self._schema_keys):\n warnings.append((\n key, f\"Unrecognised configuration name: {key}\"\n ))\n\n return failed, warnings\n\n def _pre_validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n config_option.pre_validation(self, key_name=key)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n return failed, warnings\n\n def _post_validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n config_option.post_validation(self, key_name=key)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n return failed, warnings\n\n def validate(self):\n\n failed, warnings = self._pre_validate()\n\n run_failed, run_warnings = self._validate()\n\n failed.extend(run_failed)\n warnings.extend(run_warnings)\n\n # Only run the post validation steps if there are no failures, warnings\n # are okay.\n if len(failed) == 0:\n post_failed, post_warnings = self._post_validate()\n failed.extend(post_failed)\n warnings.extend(post_warnings)\n\n return failed, warnings\n\n def load_dict(self, patch):\n \"\"\" Load config options from a dictionary. \"\"\"\n\n if not isinstance(patch, dict):\n raise exceptions.ConfigurationError(\n \"The configuration is invalid. The expected type was a key \"\n \"value mapping (a python dict) but we got an object of type: \"\n \"{}\".format(type(patch)))\n\n self.user_configs.append(patch)\n self.data.update(patch)\n\n def load_file(self, config_file):\n \"\"\" Load config options from the open file descriptor of a YAML file. \"\"\"\n try:\n return self.load_dict(utils.yaml_load(config_file))\n except YAMLError as e:\n # MkDocs knows and understands ConfigurationErrors\n raise exceptions.ConfigurationError(\n f\"MkDocs encountered an error parsing the configuration file: {e}\"\n )\n\n\n@contextmanager\ndef _open_config_file(config_file):\n \"\"\"\n A context manager which yields an open file descriptor ready to be read.\n\n Accepts a filename as a string, an open or closed file descriptor, or None.\n When None, it defaults to `mkdocs.yml` in the CWD. If a closed file descriptor\n is received, a new file descriptor is opened for the same file.\n\n The file descriptor is automaticaly closed when the context manager block is existed.\n \"\"\"\n\n # Default to the standard config filename.\n if config_file is None:\n config_file = os.path.abspath('mkdocs.yml')\n\n # If closed file descriptor, get file path to reopen later.\n if hasattr(config_file, 'closed') and config_file.closed:\n config_file = config_file.name\n\n log.debug(f\"Loading configuration file: {config_file}\")\n\n # If it is a string, we can assume it is a path and attempt to open it.\n if isinstance(config_file, str):\n if os.path.exists(config_file):\n config_file = open(config_file, 'rb')\n else:\n raise exceptions.ConfigurationError(\n f\"Config file '{config_file}' does not exist.\")\n\n # Ensure file descriptor is at begining\n config_file.seek(0)\n try:\n yield config_file\n finally:\n if hasattr(config_file, 'close'):\n config_file.close()\n\n\ndef load_config(config_file=None, **kwargs):\n \"\"\"\n Load the configuration for a given file object or name\n\n The config_file can either be a file object, string or None. If it is None\n the default `mkdocs.yml` filename will loaded.\n\n Extra kwargs are passed to the configuration to replace any default values\n unless they themselves are None.\n \"\"\"\n options = kwargs.copy()\n\n # Filter None values from the options. This usually happens with optional\n # parameters from Click.\n for key, value in options.copy().items():\n if value is None:\n options.pop(key)\n\n with _open_config_file(config_file) as fd:\n options['config_file_path'] = getattr(fd, 'name', '')\n\n # Initialise the config with the default schema.\n from mkdocs.config.defaults import get_schema\n cfg = Config(schema=get_schema(), config_file_path=options['config_file_path'])\n # load the config file\n cfg.load_file(fd)\n\n # Then load the options to overwrite anything in the config.\n cfg.load_dict(options)\n\n errors, warnings = cfg.validate()\n\n for config_name, warning in warnings:\n log.warning(f\"Config value: '{config_name}'. Warning: {warning}\")\n\n for config_name, error in errors:\n log.error(f\"Config value: '{config_name}'. Error: {error}\")\n\n for key, value in cfg.items():\n log.debug(f\"Config value: '{key}' = {value!r}\")\n\n if len(errors) > 0:\n raise exceptions.Abort(\n \"Aborted with {} Configuration Errors!\".format(len(errors))\n )\n elif cfg['strict'] and len(warnings) > 0:\n raise exceptions.Abort(\n \"Aborted with {} Configuration Warnings in 'strict' mode!\".format(len(warnings))\n )\n\n return cfg\n", "path": "mkdocs/config/base.py"}]} | 2,959 | 468 |
gh_patches_debug_37261 | rasdani/github-patches | git_diff | cupy__cupy-781 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cupy.linalg.norm returns complex scalar for complex input
```python
>>> a = cupy.array([1j, 2, 3])
>>> a.dtype
dtype('complex128')
>>> cupy.linalg.norm(a).dtype
dtype('complex128')
```
It should be `float64` in this case.
</issue>
<code>
[start of cupy/linalg/norms.py]
1 import numpy
2 from numpy import linalg
3
4 import cupy
5 from cupy import cuda
6 from cupy.cuda import device
7 from cupy.linalg import decomposition
8 from cupy.linalg import util
9
10
11 if cuda.cusolver_enabled:
12 from cupy.cuda import cusolver
13
14
15 def norm(x, ord=None, axis=None, keepdims=False):
16 """Returns one of matrix norms specified by ``ord`` parameter.
17
18 Complex valued matrices and vectors are not supported.
19 See numpy.linalg.norm for more detail.
20
21 Args:
22 x (cupy.ndarray): Array to take norm. If ``axis`` is None,
23 ``x`` must be 1-D or 2-D.
24 ord (non-zero int, inf, -inf, 'fro'): Norm type.
25 axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
26 ``axis``.
27 keepdims (bool): If this is set ``True``, the axes which are normed
28 over are left.
29
30 Returns:
31 cupy.ndarray
32
33 """
34 if not issubclass(x.dtype.type, numpy.inexact):
35 x = x.astype(float)
36
37 # Immediately handle some default, simple, fast, and common cases.
38 if axis is None:
39 ndim = x.ndim
40 if (ord is None or (ndim == 1 and ord == 2) or
41 (ndim == 2 and ord in ('f', 'fro'))):
42 ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))
43 if keepdims:
44 ret = ret.reshape((1,) * ndim)
45 return ret
46
47 # Normalize the `axis` argument to a tuple.
48 nd = x.ndim
49 if axis is None:
50 axis = tuple(range(nd))
51 elif not isinstance(axis, tuple):
52 try:
53 axis = int(axis)
54 except Exception:
55 raise TypeError(
56 "'axis' must be None, an integer or a tuple of integers")
57 axis = (axis,)
58
59 if len(axis) == 1:
60 if ord == numpy.Inf:
61 return abs(x).max(axis=axis, keepdims=keepdims)
62 elif ord == -numpy.Inf:
63 return abs(x).min(axis=axis, keepdims=keepdims)
64 elif ord == 0:
65 # Zero norm
66 # Convert to Python float in accordance with NumPy
67 return (x != 0).sum(axis=axis, keepdims=keepdims, dtype='d')
68 elif ord == 1:
69 # special case for speedup
70 return abs(x).sum(axis=axis, keepdims=keepdims)
71 elif ord is None or ord == 2:
72 # special case for speedup
73 s = x ** 2
74 return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
75 else:
76 try:
77 float(ord)
78 except TypeError:
79 raise ValueError("Invalid norm order for vectors.")
80 absx = abs(x).astype('d')
81 absx **= ord
82 ret = absx.sum(axis=axis, keepdims=keepdims)
83 ret **= (1.0 / ord)
84 return ret
85 elif len(axis) == 2:
86 row_axis, col_axis = axis
87 if row_axis < 0:
88 row_axis += nd
89 if col_axis < 0:
90 col_axis += nd
91 if not (0 <= row_axis < nd and 0 <= col_axis < nd):
92 raise ValueError('Invalid axis %r for an array with shape %r' %
93 (axis, x.shape))
94 if row_axis == col_axis:
95 raise ValueError('Duplicate axes given.')
96 if ord == 1:
97 if col_axis > row_axis:
98 col_axis -= 1
99 ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
100 elif ord == numpy.Inf:
101 if row_axis > col_axis:
102 row_axis -= 1
103 ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
104 elif ord == -1:
105 if col_axis > row_axis:
106 col_axis -= 1
107 ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
108 elif ord == -numpy.Inf:
109 if row_axis > col_axis:
110 row_axis -= 1
111 ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
112 elif ord in [None, 'fro', 'f']:
113 ret = cupy.sqrt((x ** 2).sum(axis=axis))
114 else:
115 raise ValueError("Invalid norm order for matrices.")
116 if keepdims:
117 ret_shape = list(x.shape)
118 ret_shape[axis[0]] = 1
119 ret_shape[axis[1]] = 1
120 ret = ret.reshape(ret_shape)
121 return ret
122 else:
123 raise ValueError("Improper number of dimensions to norm.")
124
125
126 # TODO(okuta): Implement cond
127
128
129 def det(a):
130 """Retruns the deteminant of an array.
131
132 Args:
133 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
134
135 Returns:
136 cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.
137
138 .. seealso:: :func:`numpy.linalg.det`
139 """
140 sign, logdet = slogdet(a)
141 return sign * cupy.exp(logdet)
142
143
144 def matrix_rank(M, tol=None):
145 """Return matrix rank of array using SVD method
146
147 Args:
148 M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to
149 2.
150 tol (None or float): Threshold of singular value of `M`.
151 When `tol` is `None`, and `eps` is the epsilon value for datatype
152 of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,
153 where `S` is the singular value of `M`.
154 It obeys :func:`numpy.linalg.matrix_rank`.
155
156 Returns:
157 cupy.ndarray: Rank of `M`.
158
159 .. seealso:: :func:`numpy.linalg.matrix_rank`
160 """
161 if M.ndim < 2:
162 return (M != 0).any().astype('l')
163 S = decomposition.svd(M, compute_uv=False)
164 if tol is None:
165 tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *
166 numpy.finfo(S.dtype).eps)
167 return (S > tol).sum(axis=-1)
168
169
170 def slogdet(a):
171 """Returns sign and logarithm of the determinat of an array.
172
173 It calculates the natural logarithm of the deteminant of a given value.
174
175 Args:
176 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
177
178 Returns:
179 tuple of :class:`~cupy.ndarray`:
180 It returns a tuple ``(sign, logdet)``. ``sign`` represents each
181 sign of the deteminant as a real number ``0``, ``1`` or ``-1``.
182 'logdet' represents the natural logarithm of the absolute of the
183 deteminant.
184 If the deteninant is zero, ``sign`` will be ``0`` and ``logdet``
185 will be ``-inf``.
186 The shapes of both ``sign`` and ``logdet`` are equal to
187 ``a.shape[:-2]``.
188
189 .. seealso:: :func:`numpy.linalg.slogdet`
190 """
191 if not cuda.cusolver_enabled:
192 raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')
193
194 if a.ndim < 2:
195 msg = ('%d-dimensional array given. '
196 'Array must be at least two-dimensional' % a.ndim)
197 raise linalg.LinAlgError(msg)
198
199 dtype = numpy.find_common_type((a.dtype.char, 'f'), ())
200 shape = a.shape[:-2]
201 sign = cupy.empty(shape, dtype)
202 logdet = cupy.empty(shape, dtype)
203
204 a = a.astype(dtype)
205 for index in numpy.ndindex(*shape):
206 s, l = _slogdet_one(a[index])
207 sign[index] = s
208 logdet[index] = l
209 return sign, logdet
210
211
212 def _slogdet_one(a):
213 util._assert_rank2(a)
214 util._assert_nd_squareness(a)
215 dtype = a.dtype
216
217 handle = device.get_cusolver_handle()
218 m = len(a)
219 ipiv = cupy.empty(m, 'i')
220 info = cupy.empty((), 'i')
221
222 # Need to make a copy because getrf works inplace
223 a_copy = a.copy(order='F')
224
225 if dtype == 'f':
226 getrf_bufferSize = cusolver.sgetrf_bufferSize
227 getrf = cusolver.sgetrf
228 else:
229 getrf_bufferSize = cusolver.dgetrf_bufferSize
230 getrf = cusolver.dgetrf
231
232 buffersize = getrf_bufferSize(handle, m, m, a_copy.data.ptr, m)
233 workspace = cupy.empty(buffersize, dtype=dtype)
234 getrf(handle, m, m, a_copy.data.ptr, m, workspace.data.ptr,
235 ipiv.data.ptr, info.data.ptr)
236
237 if info[()] == 0:
238 diag = cupy.diag(a_copy)
239 # ipiv is 1-origin
240 non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, m + 1)) +
241 cupy.count_nonzero(diag < 0))
242 # Note: sign == -1 ** (non_zero % 2)
243 sign = (non_zero % 2) * -2 + 1
244 logdet = cupy.log(abs(diag)).sum()
245 else:
246 sign = cupy.array(0.0, dtype=dtype)
247 logdet = cupy.array(float('-inf'), dtype)
248
249 return sign, logdet
250
251
252 def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
253 """Returns the sum along the diagonals of an array.
254
255 It computes the sum along the diagonals at ``axis1`` and ``axis2``.
256
257 Args:
258 a (cupy.ndarray): Array to take trace.
259 offset (int): Index of diagonals. Zero indicates the main diagonal, a
260 positive value an upper diagonal, and a negative value a lower
261 diagonal.
262 axis1 (int): The first axis along which the trace is taken.
263 axis2 (int): The second axis along which the trace is taken.
264 dtype: Data type specifier of the output.
265 out (cupy.ndarray): Output array.
266
267 Returns:
268 cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
269
270 .. seealso:: :func:`numpy.trace`
271
272 """
273 # TODO(okuta): check type
274 return a.trace(offset, axis1, axis2, dtype, out)
275
[end of cupy/linalg/norms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py
--- a/cupy/linalg/norms.py
+++ b/cupy/linalg/norms.py
@@ -39,7 +39,12 @@
ndim = x.ndim
if (ord is None or (ndim == 1 and ord == 2) or
(ndim == 2 and ord in ('f', 'fro'))):
- ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ s = abs(x.ravel())
+ s *= s
+ ret = cupy.sqrt(s.sum())
+ else:
+ ret = cupy.sqrt((x.ravel() ** 2).sum())
if keepdims:
ret = ret.reshape((1,) * ndim)
return ret
@@ -70,14 +75,27 @@
return abs(x).sum(axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
- s = x ** 2
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ s = abs(x)
+ s *= s
+ else:
+ s = x ** 2
return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
else:
try:
float(ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
- absx = abs(x).astype('d')
+
+ # Mirror Numpy behavior of casting to double for non-complex
+ # dtypes, and to float32 or float64 for complex dtypes and
+ # no reduction over all axes.
+ cast_dtype = 'd'
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ if keepdims or tuple(sorted(axis)) != tuple(range(nd)):
+ cast_dtype = x.dtype.char.lower() # 'D'->'d' and 'F'->'f'
+
+ absx = abs(x).astype(cast_dtype)
absx **= ord
ret = absx.sum(axis=axis, keepdims=keepdims)
ret **= (1.0 / ord)
@@ -110,7 +128,12 @@
row_axis -= 1
ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
- ret = cupy.sqrt((x ** 2).sum(axis=axis))
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ s = abs(x)
+ s *= s
+ ret = cupy.sqrt(s.sum(axis=axis))
+ else:
+ ret = cupy.sqrt((x ** 2).sum(axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
| {"golden_diff": "diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py\n--- a/cupy/linalg/norms.py\n+++ b/cupy/linalg/norms.py\n@@ -39,7 +39,12 @@\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n- ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ s = abs(x.ravel())\n+ s *= s\n+ ret = cupy.sqrt(s.sum())\n+ else:\n+ ret = cupy.sqrt((x.ravel() ** 2).sum())\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n@@ -70,14 +75,27 @@\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n- s = x ** 2\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ s = abs(x)\n+ s *= s\n+ else:\n+ s = x ** 2\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n- absx = abs(x).astype('d')\n+\n+ # Mirror Numpy behavior of casting to double for non-complex\n+ # dtypes, and to float32 or float64 for complex dtypes and\n+ # no reduction over all axes.\n+ cast_dtype = 'd'\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ if keepdims or tuple(sorted(axis)) != tuple(range(nd)):\n+ cast_dtype = x.dtype.char.lower() # 'D'->'d' and 'F'->'f'\n+\n+ absx = abs(x).astype(cast_dtype)\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= (1.0 / ord)\n@@ -110,7 +128,12 @@\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n- ret = cupy.sqrt((x ** 2).sum(axis=axis))\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ s = abs(x)\n+ s *= s\n+ ret = cupy.sqrt(s.sum(axis=axis))\n+ else:\n+ ret = cupy.sqrt((x ** 2).sum(axis=axis))\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n", "issue": "cupy.linalg.norm returns complex scalar for complex input\n```python\r\n>>> a = cupy.array([1j, 2, 3])\r\n>>> a.dtype\r\ndtype('complex128')\r\n>>> cupy.linalg.norm(a).dtype\r\ndtype('complex128')\r\n```\r\nIt should be `float64` in this case.\n", "before_files": [{"content": "import numpy\nfrom numpy import linalg\n\nimport cupy\nfrom cupy import cuda\nfrom cupy.cuda import device\nfrom cupy.linalg import decomposition\nfrom cupy.linalg import util\n\n\nif cuda.cusolver_enabled:\n from cupy.cuda import cusolver\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n\n Complex valued matrices and vectors are not supported.\n See numpy.linalg.norm for more detail.\n\n Args:\n x (cupy.ndarray): Array to take norm. If ``axis`` is None,\n ``x`` must be 1-D or 2-D.\n ord (non-zero int, inf, -inf, 'fro'): Norm type.\n axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over\n ``axis``.\n keepdims (bool): If this is set ``True``, the axes which are normed\n over are left.\n\n Returns:\n cupy.ndarray\n\n \"\"\"\n if not issubclass(x.dtype.type, numpy.inexact):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\n \"'axis' must be None, an integer or a tuple of integers\")\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == numpy.Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -numpy.Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n # Convert to Python float in accordance with NumPy\n return (x != 0).sum(axis=axis, keepdims=keepdims, dtype='d')\n elif ord == 1:\n # special case for speedup\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n s = x ** 2\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n absx = abs(x).astype('d')\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= (1.0 / ord)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n elif ord == numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).min(axis=col_axis)\n elif ord == -numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n ret = cupy.sqrt((x ** 2).sum(axis=axis))\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n\n# TODO(okuta): Implement cond\n\n\ndef det(a):\n \"\"\"Retruns the deteminant of an array.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.det`\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * cupy.exp(logdet)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"Return matrix rank of array using SVD method\n\n Args:\n M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to\n 2.\n tol (None or float): Threshold of singular value of `M`.\n When `tol` is `None`, and `eps` is the epsilon value for datatype\n of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,\n where `S` is the singular value of `M`.\n It obeys :func:`numpy.linalg.matrix_rank`.\n\n Returns:\n cupy.ndarray: Rank of `M`.\n\n .. seealso:: :func:`numpy.linalg.matrix_rank`\n \"\"\"\n if M.ndim < 2:\n return (M != 0).any().astype('l')\n S = decomposition.svd(M, compute_uv=False)\n if tol is None:\n tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *\n numpy.finfo(S.dtype).eps)\n return (S > tol).sum(axis=-1)\n\n\ndef slogdet(a):\n \"\"\"Returns sign and logarithm of the determinat of an array.\n\n It calculates the natural logarithm of the deteminant of a given value.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n tuple of :class:`~cupy.ndarray`:\n It returns a tuple ``(sign, logdet)``. ``sign`` represents each\n sign of the deteminant as a real number ``0``, ``1`` or ``-1``.\n 'logdet' represents the natural logarithm of the absolute of the\n deteminant.\n If the deteninant is zero, ``sign`` will be ``0`` and ``logdet``\n will be ``-inf``.\n The shapes of both ``sign`` and ``logdet`` are equal to\n ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.slogdet`\n \"\"\"\n if not cuda.cusolver_enabled:\n raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')\n\n if a.ndim < 2:\n msg = ('%d-dimensional array given. '\n 'Array must be at least two-dimensional' % a.ndim)\n raise linalg.LinAlgError(msg)\n\n dtype = numpy.find_common_type((a.dtype.char, 'f'), ())\n shape = a.shape[:-2]\n sign = cupy.empty(shape, dtype)\n logdet = cupy.empty(shape, dtype)\n\n a = a.astype(dtype)\n for index in numpy.ndindex(*shape):\n s, l = _slogdet_one(a[index])\n sign[index] = s\n logdet[index] = l\n return sign, logdet\n\n\ndef _slogdet_one(a):\n util._assert_rank2(a)\n util._assert_nd_squareness(a)\n dtype = a.dtype\n\n handle = device.get_cusolver_handle()\n m = len(a)\n ipiv = cupy.empty(m, 'i')\n info = cupy.empty((), 'i')\n\n # Need to make a copy because getrf works inplace\n a_copy = a.copy(order='F')\n\n if dtype == 'f':\n getrf_bufferSize = cusolver.sgetrf_bufferSize\n getrf = cusolver.sgetrf\n else:\n getrf_bufferSize = cusolver.dgetrf_bufferSize\n getrf = cusolver.dgetrf\n\n buffersize = getrf_bufferSize(handle, m, m, a_copy.data.ptr, m)\n workspace = cupy.empty(buffersize, dtype=dtype)\n getrf(handle, m, m, a_copy.data.ptr, m, workspace.data.ptr,\n ipiv.data.ptr, info.data.ptr)\n\n if info[()] == 0:\n diag = cupy.diag(a_copy)\n # ipiv is 1-origin\n non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, m + 1)) +\n cupy.count_nonzero(diag < 0))\n # Note: sign == -1 ** (non_zero % 2)\n sign = (non_zero % 2) * -2 + 1\n logdet = cupy.log(abs(diag)).sum()\n else:\n sign = cupy.array(0.0, dtype=dtype)\n logdet = cupy.array(float('-inf'), dtype)\n\n return sign, logdet\n\n\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"Returns the sum along the diagonals of an array.\n\n It computes the sum along the diagonals at ``axis1`` and ``axis2``.\n\n Args:\n a (cupy.ndarray): Array to take trace.\n offset (int): Index of diagonals. Zero indicates the main diagonal, a\n positive value an upper diagonal, and a negative value a lower\n diagonal.\n axis1 (int): The first axis along which the trace is taken.\n axis2 (int): The second axis along which the trace is taken.\n dtype: Data type specifier of the output.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.\n\n .. seealso:: :func:`numpy.trace`\n\n \"\"\"\n # TODO(okuta): check type\n return a.trace(offset, axis1, axis2, dtype, out)\n", "path": "cupy/linalg/norms.py"}]} | 3,739 | 665 |
gh_patches_debug_28952 | rasdani/github-patches | git_diff | nautobot__nautobot-3943 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Protect against Git-repo jobs clobbering sys.modules
### As ...
Patti - Platform Admin
### I want ...
To add Jobs via Git repositories without worrying about them impacting the overall stability of the platform.
After #3840 , there is a risk that a misnamed (or maliciously-named) Git repository could potentially clobber existing Python code. For example, creating a repository and assigning it the slug `nautobot` causes all sorts of havoc as it results in Nautobot unloading itself and then attempting to reimport all of Nautobot's code from the Git repository by that name.
### So that ...
The application is resilient to user error as well as mischievous/malicious actions by authenticated users.
### I know this is done when...
- Creating a GitRepository whose `slug` matches any currently loaded Python module in the Nautobot environment is rejected with an appropriate error message.
### Optional - Feature groups this request pertains to.
- [X] Automation
- [ ] Circuits
- [ ] DCIM
- [ ] IPAM
- [ ] Misc (including Data Sources)
- [ ] Organization
- [ ] Plugins (and other Extensibility)
- [X] Security (Secrets, etc)
- [ ] Image Management
- [ ] UI/UX
- [ ] Documentation
- [ ] Other (not directly a platform feature)
### Database Changes
None
### External Dependencies
None
</issue>
<code>
[start of nautobot/extras/models/datasources.py]
1 """Models for representing external data sources."""
2 import os
3
4 from django.conf import settings
5 from django.core.exceptions import ValidationError
6 from django.core.serializers.json import DjangoJSONEncoder
7 from django.core.validators import URLValidator
8 from django.db import models
9
10 from nautobot.core.models.fields import AutoSlugField, slugify_dashes_to_underscores
11 from nautobot.core.models.generics import PrimaryModel
12 from nautobot.extras.utils import extras_features, check_if_key_is_graphql_safe
13
14
15 @extras_features(
16 "config_context_owners",
17 "export_template_owners",
18 "job_results",
19 "webhooks",
20 )
21 class GitRepository(PrimaryModel):
22 """Representation of a Git repository used as an external data source."""
23
24 name = models.CharField(
25 max_length=100,
26 unique=True,
27 )
28 slug = AutoSlugField(
29 populate_from="name",
30 help_text="Internal field name. Please use underscores rather than dashes in this key.",
31 slugify_function=slugify_dashes_to_underscores,
32 )
33
34 remote_url = models.URLField(
35 max_length=255,
36 # For the moment we don't support ssh:// and git:// URLs
37 help_text="Only HTTP and HTTPS URLs are presently supported",
38 validators=[URLValidator(schemes=["http", "https"])],
39 )
40 branch = models.CharField(
41 max_length=64,
42 default="main",
43 )
44
45 current_head = models.CharField(
46 help_text="Commit hash of the most recent fetch from the selected branch. Used for syncing between workers.",
47 max_length=48,
48 default="",
49 blank=True,
50 )
51
52 secrets_group = models.ForeignKey(
53 to="extras.SecretsGroup",
54 on_delete=models.SET_NULL,
55 default=None,
56 blank=True,
57 null=True,
58 related_name="git_repositories",
59 )
60
61 # Data content types that this repo is a source of. Valid options are dynamically generated based on
62 # the data types registered in registry['datasource_contents'].
63 provided_contents = models.JSONField(encoder=DjangoJSONEncoder, default=list, blank=True)
64
65 clone_fields = ["remote_url", "secrets_group", "provided_contents"]
66
67 class Meta:
68 ordering = ["name"]
69 verbose_name = "Git repository"
70 verbose_name_plural = "Git repositories"
71
72 def __init__(self, *args, **kwargs):
73 super().__init__(*args, **kwargs)
74
75 # Store the initial repo slug so we can check for changes on save().
76 self.__initial_slug = self.slug
77
78 def __str__(self):
79 return self.name
80
81 def clean(self):
82 super().clean()
83
84 if self.slug != "":
85 check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, "slug")
86
87 if self.present_in_database and self.slug != self.__initial_slug:
88 raise ValidationError(
89 f"Slug cannot be changed once set. Current slug is {self.__initial_slug}, "
90 f"requested slug is {self.slug}"
91 )
92
93 def get_latest_sync(self):
94 """
95 Return a `JobResult` for the latest sync operation.
96
97 Returns:
98 JobResult
99 """
100 from nautobot.extras.models import JobResult
101
102 # This will match all "GitRepository" jobs (pull/refresh, dry-run, etc.)
103 prefix = "nautobot.core.jobs.GitRepository"
104 return JobResult.objects.filter(task_name__startswith=prefix, task_kwargs__repository=self.pk).latest()
105
106 def to_csv(self):
107 return (
108 self.name,
109 self.slug,
110 self.remote_url,
111 self.branch,
112 self.secrets_group.name if self.secrets_group else None,
113 self.provided_contents,
114 )
115
116 @property
117 def filesystem_path(self):
118 return os.path.join(settings.GIT_ROOT, self.slug)
119
120 def sync(self, user, dry_run=False):
121 """
122 Enqueue a Job to pull the Git repository from the remote and return the sync result.
123
124 Args:
125 user (User): The User that will perform the sync.
126 dry_run (bool): If set, dry-run the Git sync.
127
128 Returns:
129 JobResult
130 """
131 from nautobot.extras.datasources import (
132 enqueue_pull_git_repository_and_refresh_data,
133 enqueue_git_repository_diff_origin_and_local,
134 )
135
136 if dry_run:
137 return enqueue_git_repository_diff_origin_and_local(self, user)
138 return enqueue_pull_git_repository_and_refresh_data(self, user)
139
[end of nautobot/extras/models/datasources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/extras/models/datasources.py b/nautobot/extras/models/datasources.py
--- a/nautobot/extras/models/datasources.py
+++ b/nautobot/extras/models/datasources.py
@@ -1,4 +1,5 @@
"""Models for representing external data sources."""
+from importlib.util import find_spec
import os
from django.conf import settings
@@ -81,13 +82,24 @@
def clean(self):
super().clean()
- if self.slug != "":
- check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, "slug")
+ # Autogenerate slug now, rather than in pre_save(), if not set already, as we need to check it below.
+ if self.slug == "":
+ self._meta.get_field("slug").create_slug(self, add=(not self.present_in_database))
+
+ if self.present_in_database and self.slug != self.__initial_slug:
+ raise ValidationError(
+ f"Slug cannot be changed once set. Current slug is {self.__initial_slug}, "
+ f"requested slug is {self.slug}"
+ )
- if self.present_in_database and self.slug != self.__initial_slug:
+ if not self.present_in_database:
+ check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, "slug")
+ # Check on create whether the proposed slug conflicts with a module name already in the Python environment.
+ # Because we add GIT_ROOT to the end of sys.path, trying to import this repository will instead
+ # import the earlier-found Python module in its place, which would be undesirable.
+ if find_spec(self.slug) is not None:
raise ValidationError(
- f"Slug cannot be changed once set. Current slug is {self.__initial_slug}, "
- f"requested slug is {self.slug}"
+ f'Please choose a different slug, as "{self.slug}" is an installed Python package or module.'
)
def get_latest_sync(self):
| {"golden_diff": "diff --git a/nautobot/extras/models/datasources.py b/nautobot/extras/models/datasources.py\n--- a/nautobot/extras/models/datasources.py\n+++ b/nautobot/extras/models/datasources.py\n@@ -1,4 +1,5 @@\n \"\"\"Models for representing external data sources.\"\"\"\n+from importlib.util import find_spec\n import os\n \n from django.conf import settings\n@@ -81,13 +82,24 @@\n def clean(self):\n super().clean()\n \n- if self.slug != \"\":\n- check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n+ # Autogenerate slug now, rather than in pre_save(), if not set already, as we need to check it below.\n+ if self.slug == \"\":\n+ self._meta.get_field(\"slug\").create_slug(self, add=(not self.present_in_database))\n+\n+ if self.present_in_database and self.slug != self.__initial_slug:\n+ raise ValidationError(\n+ f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n+ f\"requested slug is {self.slug}\"\n+ )\n \n- if self.present_in_database and self.slug != self.__initial_slug:\n+ if not self.present_in_database:\n+ check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n+ # Check on create whether the proposed slug conflicts with a module name already in the Python environment.\n+ # Because we add GIT_ROOT to the end of sys.path, trying to import this repository will instead\n+ # import the earlier-found Python module in its place, which would be undesirable.\n+ if find_spec(self.slug) is not None:\n raise ValidationError(\n- f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n- f\"requested slug is {self.slug}\"\n+ f'Please choose a different slug, as \"{self.slug}\" is an installed Python package or module.'\n )\n \n def get_latest_sync(self):\n", "issue": "Protect against Git-repo jobs clobbering sys.modules\n### As ...\n\nPatti - Platform Admin\n\n### I want ...\n\nTo add Jobs via Git repositories without worrying about them impacting the overall stability of the platform.\r\n\r\nAfter #3840 , there is a risk that a misnamed (or maliciously-named) Git repository could potentially clobber existing Python code. For example, creating a repository and assigning it the slug `nautobot` causes all sorts of havoc as it results in Nautobot unloading itself and then attempting to reimport all of Nautobot's code from the Git repository by that name.\n\n### So that ...\n\nThe application is resilient to user error as well as mischievous/malicious actions by authenticated users.\n\n### I know this is done when...\n\n- Creating a GitRepository whose `slug` matches any currently loaded Python module in the Nautobot environment is rejected with an appropriate error message.\n\n### Optional - Feature groups this request pertains to.\n\n- [X] Automation\n- [ ] Circuits\n- [ ] DCIM\n- [ ] IPAM\n- [ ] Misc (including Data Sources)\n- [ ] Organization\n- [ ] Plugins (and other Extensibility)\n- [X] Security (Secrets, etc)\n- [ ] Image Management\n- [ ] UI/UX\n- [ ] Documentation\n- [ ] Other (not directly a platform feature)\n\n### Database Changes\n\nNone\n\n### External Dependencies\n\nNone\n", "before_files": [{"content": "\"\"\"Models for representing external data sources.\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import URLValidator\nfrom django.db import models\n\nfrom nautobot.core.models.fields import AutoSlugField, slugify_dashes_to_underscores\nfrom nautobot.core.models.generics import PrimaryModel\nfrom nautobot.extras.utils import extras_features, check_if_key_is_graphql_safe\n\n\n@extras_features(\n \"config_context_owners\",\n \"export_template_owners\",\n \"job_results\",\n \"webhooks\",\n)\nclass GitRepository(PrimaryModel):\n \"\"\"Representation of a Git repository used as an external data source.\"\"\"\n\n name = models.CharField(\n max_length=100,\n unique=True,\n )\n slug = AutoSlugField(\n populate_from=\"name\",\n help_text=\"Internal field name. Please use underscores rather than dashes in this key.\",\n slugify_function=slugify_dashes_to_underscores,\n )\n\n remote_url = models.URLField(\n max_length=255,\n # For the moment we don't support ssh:// and git:// URLs\n help_text=\"Only HTTP and HTTPS URLs are presently supported\",\n validators=[URLValidator(schemes=[\"http\", \"https\"])],\n )\n branch = models.CharField(\n max_length=64,\n default=\"main\",\n )\n\n current_head = models.CharField(\n help_text=\"Commit hash of the most recent fetch from the selected branch. Used for syncing between workers.\",\n max_length=48,\n default=\"\",\n blank=True,\n )\n\n secrets_group = models.ForeignKey(\n to=\"extras.SecretsGroup\",\n on_delete=models.SET_NULL,\n default=None,\n blank=True,\n null=True,\n related_name=\"git_repositories\",\n )\n\n # Data content types that this repo is a source of. Valid options are dynamically generated based on\n # the data types registered in registry['datasource_contents'].\n provided_contents = models.JSONField(encoder=DjangoJSONEncoder, default=list, blank=True)\n\n clone_fields = [\"remote_url\", \"secrets_group\", \"provided_contents\"]\n\n class Meta:\n ordering = [\"name\"]\n verbose_name = \"Git repository\"\n verbose_name_plural = \"Git repositories\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Store the initial repo slug so we can check for changes on save().\n self.__initial_slug = self.slug\n\n def __str__(self):\n return self.name\n\n def clean(self):\n super().clean()\n\n if self.slug != \"\":\n check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n\n if self.present_in_database and self.slug != self.__initial_slug:\n raise ValidationError(\n f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n f\"requested slug is {self.slug}\"\n )\n\n def get_latest_sync(self):\n \"\"\"\n Return a `JobResult` for the latest sync operation.\n\n Returns:\n JobResult\n \"\"\"\n from nautobot.extras.models import JobResult\n\n # This will match all \"GitRepository\" jobs (pull/refresh, dry-run, etc.)\n prefix = \"nautobot.core.jobs.GitRepository\"\n return JobResult.objects.filter(task_name__startswith=prefix, task_kwargs__repository=self.pk).latest()\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.remote_url,\n self.branch,\n self.secrets_group.name if self.secrets_group else None,\n self.provided_contents,\n )\n\n @property\n def filesystem_path(self):\n return os.path.join(settings.GIT_ROOT, self.slug)\n\n def sync(self, user, dry_run=False):\n \"\"\"\n Enqueue a Job to pull the Git repository from the remote and return the sync result.\n\n Args:\n user (User): The User that will perform the sync.\n dry_run (bool): If set, dry-run the Git sync.\n\n Returns:\n JobResult\n \"\"\"\n from nautobot.extras.datasources import (\n enqueue_pull_git_repository_and_refresh_data,\n enqueue_git_repository_diff_origin_and_local,\n )\n\n if dry_run:\n return enqueue_git_repository_diff_origin_and_local(self, user)\n return enqueue_pull_git_repository_and_refresh_data(self, user)\n", "path": "nautobot/extras/models/datasources.py"}]} | 2,119 | 445 |
gh_patches_debug_160 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop distutils support
I feel it's about time to drop **distutils** support as we rely on [extras_require of setuptools](https://pythonhosted.org/setuptools/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) to handle **yaml** dependencies..
What's your opinion on this matter?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5
6 try:
7 from setuptools import setup
8 except ImportError:
9 from distutils.core import setup
10
11 version = "1.3.0"
12
13 if sys.argv[-1] == 'publish':
14 os.system('python setup.py sdist upload')
15 os.system('python setup.py bdist_wheel upload')
16 sys.exit()
17
18 if sys.argv[-1] == 'tag':
19 os.system("git tag -a %s -m 'version %s'" % (version, version))
20 os.system("git push --tags")
21 sys.exit()
22
23 with open('README.rst') as readme_file:
24 readme = readme_file.read()
25
26 with open('HISTORY.rst') as history_file:
27 history = history_file.read().replace('.. :changelog:', '')
28
29 requirements = [
30 'future>=0.15.2',
31 'binaryornot>=0.2.0',
32 'jinja2>=2.7',
33 'click>=5.0',
34 'whichcraft>=0.1.1'
35 ]
36
37 long_description = readme + '\n\n' + history
38
39 if sys.argv[-1] == 'readme':
40 print(long_description)
41 sys.exit()
42
43
44 setup(
45 name='cookiecutter',
46 version=version,
47 description=('A command-line utility that creates projects from project '
48 'templates, e.g. creating a Python package project from a '
49 'Python package project template.'),
50 long_description=long_description,
51 author='Audrey Roy',
52 author_email='[email protected]',
53 url='https://github.com/audreyr/cookiecutter',
54 packages=[
55 'cookiecutter',
56 ],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={
59 'console_scripts': [
60 'cookiecutter = cookiecutter.cli:main',
61 ]
62 },
63 include_package_data=True,
64 install_requires=requirements,
65 extras_require={
66 ':sys_platform=="win32" and python_version=="2.7"': [
67 'PyYAML>=3.10'
68 ],
69 ':sys_platform!="win32" or python_version!="2.7"': [
70 'ruamel.yaml>=0.10.12'
71 ]
72 },
73 license='BSD',
74 zip_safe=False,
75 classifiers=[
76 'Development Status :: 5 - Production/Stable',
77 'Environment :: Console',
78 'Intended Audience :: Developers',
79 'Natural Language :: English',
80 'License :: OSI Approved :: BSD License',
81 'Programming Language :: Python',
82 'Programming Language :: Python :: 2',
83 'Programming Language :: Python :: 2.7',
84 'Programming Language :: Python :: 3',
85 'Programming Language :: Python :: 3.3',
86 'Programming Language :: Python :: 3.4',
87 'Programming Language :: Python :: 3.5',
88 'Programming Language :: Python :: Implementation :: CPython',
89 'Programming Language :: Python :: Implementation :: PyPy',
90 'Topic :: Software Development',
91 ],
92 keywords=(
93 'cookiecutter, Python, projects, project templates, Jinja2, '
94 'skeleton, scaffolding, project directory, setup.py, package, '
95 'packaging'
96 ),
97 )
98
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,10 +3,7 @@
import os
import sys
-try:
- from setuptools import setup
-except ImportError:
- from distutils.core import setup
+from setuptools import setup
version = "1.3.0"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,10 +3,7 @@\n import os\n import sys\n \n-try:\n- from setuptools import setup\n-except ImportError:\n- from distutils.core import setup\n+from setuptools import setup\n \n version = \"1.3.0\"\n", "issue": "Drop distutils support\nI feel it's about time to drop **distutils** support as we rely on [extras_require of setuptools](https://pythonhosted.org/setuptools/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) to handle **yaml** dependencies..\n\nWhat's your opinion on this matter?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nversion = \"1.3.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.1.1'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.cli:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n extras_require={\n ':sys_platform==\"win32\" and python_version==\"2.7\"': [\n 'PyYAML>=3.10'\n ],\n ':sys_platform!=\"win32\" or python_version!=\"2.7\"': [\n 'ruamel.yaml>=0.10.12'\n ]\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]} | 1,490 | 75 |
gh_patches_debug_30753 | rasdani/github-patches | git_diff | web2py__web2py-2194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stored XSS / Frame injection
In (probably) all versions including latest 2.18.5 is it possible to find one example file which is vulnerable to XSS(reflected, stored) and frame injection. (I do not want to publicly disclose attack vector and specific file because is it still possible to exploit)
</issue>
<code>
[start of gluon/serializers.py]
1 """
2 This file is part of the web2py Web Framework
3 Copyrighted by Massimo Di Pierro <[email protected]>
4 License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
5 """
6 import datetime
7 import decimal
8 from gluon.storage import Storage
9 from gluon.html import TAG, XmlComponent, xmlescape
10 from gluon.languages import lazyT
11 import gluon.contrib.rss2 as rss2
12 import json as json_parser
13 from gluon._compat import long, to_native, unicodeT, integer_types
14
15 have_yaml = True
16 try:
17 import yaml as yamlib
18 except ImportError:
19 have_yaml = False
20
21
22 def cast_keys(o, cast=str, encoding="utf-8"):
23 """
24 Builds a new object with <cast> type keys.
25 Use this function if you are in Python < 2.6.5
26 This avoids syntax errors when unpacking dictionary arguments.
27
28 Args:
29 o: is the object input
30 cast: (defaults to str) is an object type or function
31 which supports conversion such as:
32
33 converted = cast(o)
34
35 encoding: (defaults to utf-8) is the encoding for unicode
36 keys. This is not used for custom cast functions
37
38 """
39
40 if isinstance(o, (dict, Storage)):
41 if isinstance(o, dict):
42 newobj = dict()
43 else:
44 newobj = Storage()
45 for k, v in o.items():
46 if (cast == str) and isinstance(k, unicodeT):
47 key = k.encode(encoding)
48 else:
49 key = cast(k)
50 newobj[key] = cast_keys(v, cast=cast, encoding=encoding)
51 elif isinstance(o, (tuple, set, list)):
52 newobj = []
53 for item in o:
54 newobj.append(cast_keys(item, cast=cast, encoding=encoding))
55 if isinstance(o, tuple):
56 newobj = tuple(newobj)
57 elif isinstance(o, set):
58 newobj = set(newobj)
59 else:
60 # no string cast (unknown object)
61 newobj = o
62 return newobj
63
64
65 def loads_json(o, unicode_keys=True, **kwargs):
66 # deserialize a json string
67 result = json_parser.loads(o, **kwargs)
68 if not unicode_keys:
69 # filter non-str keys in dictionary objects
70 result = cast_keys(result,
71 encoding=kwargs.get("encoding", "utf-8"))
72 return result
73
74
75 def custom_json(o):
76 if hasattr(o, 'custom_json') and callable(o.custom_json):
77 return o.custom_json()
78 if isinstance(o, (datetime.date,
79 datetime.datetime,
80 datetime.time)):
81 return o.isoformat()[:19].replace('T', ' ')
82 elif isinstance(o, integer_types):
83 return int(o)
84 elif isinstance(o, decimal.Decimal):
85 return str(o)
86 elif isinstance(o, (bytes, bytearray)):
87 return str(o)
88 elif isinstance(o, lazyT):
89 return str(o)
90 elif isinstance(o, XmlComponent):
91 return to_native(o.xml())
92 elif isinstance(o, set):
93 return list(o)
94 elif hasattr(o, 'as_list') and callable(o.as_list):
95 return o.as_list()
96 elif hasattr(o, 'as_dict') and callable(o.as_dict):
97 return o.as_dict()
98 else:
99 raise TypeError(repr(o) + " is not JSON serializable")
100
101
102 def xml_rec(value, key, quote=True):
103 if hasattr(value, 'custom_xml') and callable(value.custom_xml):
104 return value.custom_xml()
105 elif isinstance(value, (dict, Storage)):
106 return TAG[key](*[TAG[k](xml_rec(v, '', quote))
107 for k, v in value.items()])
108 elif isinstance(value, list):
109 return TAG[key](*[TAG.item(xml_rec(item, '', quote)) for item in value])
110 elif hasattr(value, 'as_list') and callable(value.as_list):
111 return str(xml_rec(value.as_list(), '', quote))
112 elif hasattr(value, 'as_dict') and callable(value.as_dict):
113 return str(xml_rec(value.as_dict(), '', quote))
114 else:
115 return xmlescape(value, quote)
116
117
118 def xml(value, encoding='UTF-8', key='document', quote=True):
119 return ('<?xml version="1.0" encoding="%s"?>' % encoding) + str(xml_rec(value, key, quote))
120
121
122 def json(value, default=custom_json, indent=None, sort_keys=False):
123 value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)
124 # replace JavaScript incompatible spacing
125 # http://timelessrepo.com/json-isnt-a-javascript-subset
126 # PY3 FIXME
127 # return value.replace(ur'\u2028', '\\u2028').replace(ur'\2029', '\\u2029')
128 return value
129
130 def csv(value):
131 return ''
132
133
134 def ics(events, title=None, link=None, timeshift=0, calname=True,
135 **ignored):
136 title = title or '(unknown)'
137 if link and not callable(link):
138 link = lambda item, prefix=link: prefix.replace(
139 '[id]', str(item['id']))
140 s = 'BEGIN:VCALENDAR'
141 s += '\nVERSION:2.0'
142 if not calname is False:
143 s += '\nX-WR-CALNAME:%s' % (calname or title)
144 s += '\nSUMMARY:%s' % title
145 s += '\nPRODID:Generated by web2py'
146 s += '\nCALSCALE:GREGORIAN'
147 s += '\nMETHOD:PUBLISH'
148 for item in events:
149 s += '\nBEGIN:VEVENT'
150 s += '\nUID:%s' % item['id']
151 if link:
152 s += '\nURL:%s' % link(item)
153 shift = datetime.timedelta(seconds=3600 * timeshift)
154 start = item['start_datetime'] + shift
155 stop = item['stop_datetime'] + shift
156 s += '\nDTSTART:%s' % start.strftime('%Y%m%dT%H%M%S')
157 s += '\nDTEND:%s' % stop.strftime('%Y%m%dT%H%M%S')
158 s += '\nSUMMARY:%s' % item['title']
159 s += '\nEND:VEVENT'
160 s += '\nEND:VCALENDAR'
161 return s
162
163 def safe_encode(text):
164 if not isinstance(text, (str, unicodeT)):
165 text = str(text)
166 try:
167 text = text.encode('utf8','replace')
168 except ValueError:
169 new_text = ''
170 for c in text:
171 try:
172 new_text += c.encode('utf8')
173 except:
174 new_text += '?'
175 text = new_text
176 return text
177
178 def rss(feed):
179 if not 'entries' in feed and 'items' in feed:
180 feed['entries'] = feed['items']
181
182 def safestr(obj, key, default=''):
183 return safe_encode(obj.get(key,''))
184
185 now = datetime.datetime.now()
186 rss = rss2.RSS2(title=safestr(feed,'title'),
187 link=safestr(feed,'link'),
188 description=safestr(feed,'description'),
189 lastBuildDate=feed.get('created_on', now),
190 items=[rss2.RSSItem(
191 title=safestr(entry,'title','(notitle)'),
192 link=safestr(entry,'link'),
193 description=safestr(entry,'description'),
194 pubDate=entry.get('created_on', now)
195 ) for entry in feed.get('entries', [])])
196 return rss.to_xml(encoding='utf8')
197
198
199 def yaml(data):
200 if have_yaml:
201 return yamlib.dump(data)
202 else:
203 raise ImportError("No YAML serializer available")
204
205
206 def loads_yaml(data):
207 if have_yaml:
208 return yamlib.load(data)
209 else:
210 raise ImportError("No YAML serializer available")
211
[end of gluon/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gluon/serializers.py b/gluon/serializers.py
--- a/gluon/serializers.py
+++ b/gluon/serializers.py
@@ -119,13 +119,43 @@
return ('<?xml version="1.0" encoding="%s"?>' % encoding) + str(xml_rec(value, key, quote))
-def json(value, default=custom_json, indent=None, sort_keys=False):
- value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)
- # replace JavaScript incompatible spacing
- # http://timelessrepo.com/json-isnt-a-javascript-subset
- # PY3 FIXME
- # return value.replace(ur'\u2028', '\\u2028').replace(ur'\2029', '\\u2029')
- return value
+class JSONEncoderForHTML(json_parser.JSONEncoder):
+ """An encoder that produces JSON safe to embed in HTML.
+ To embed JSON content in, say, a script tag on a web page, the
+ characters &, < and > should be escaped. They cannot be escaped
+ with the usual entities (e.g. &) because they are not expanded
+ within <script> tags.
+ This class also escapes the line separator and paragraph separator
+ characters U+2028 and U+2029, irrespective of the ensure_ascii setting,
+ as these characters are not valid in JavaScript strings (see
+ http://timelessrepo.com/json-isnt-a-javascript-subset).
+ """
+
+ def encode(self, o):
+ # Override JSONEncoder.encode because it has hacks for
+ # performance that make things more complicated.
+ chunks = self.iterencode(o, True)
+ if self.ensure_ascii:
+ return ''.join(chunks)
+ else:
+ return u''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
+ for chunk in chunks:
+ chunk = chunk.replace('&', '\\u0026')
+ chunk = chunk.replace('<', '\\u003c')
+ chunk = chunk.replace('>', '\\u003e')
+
+ if not self.ensure_ascii:
+ chunk = chunk.replace(u'\u2028', '\\u2028')
+ chunk = chunk.replace(u'\u2029', '\\u2029')
+
+ yield chunk
+
+
+def json(value, default=custom_json, indent=None, sort_keys=False, cls=JSONEncoderForHTML):
+ return json_parser.dumps(value, default=default, cls=cls, sort_keys=sort_keys, indent=indent)
def csv(value):
return ''
| {"golden_diff": "diff --git a/gluon/serializers.py b/gluon/serializers.py\n--- a/gluon/serializers.py\n+++ b/gluon/serializers.py\n@@ -119,13 +119,43 @@\n return ('<?xml version=\"1.0\" encoding=\"%s\"?>' % encoding) + str(xml_rec(value, key, quote))\n \n \n-def json(value, default=custom_json, indent=None, sort_keys=False):\n- value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)\n- # replace JavaScript incompatible spacing\n- # http://timelessrepo.com/json-isnt-a-javascript-subset\n- # PY3 FIXME\n- # return value.replace(ur'\\u2028', '\\\\u2028').replace(ur'\\2029', '\\\\u2029')\n- return value\n+class JSONEncoderForHTML(json_parser.JSONEncoder):\n+ \"\"\"An encoder that produces JSON safe to embed in HTML.\n+ To embed JSON content in, say, a script tag on a web page, the\n+ characters &, < and > should be escaped. They cannot be escaped\n+ with the usual entities (e.g. &) because they are not expanded\n+ within <script> tags.\n+ This class also escapes the line separator and paragraph separator\n+ characters U+2028 and U+2029, irrespective of the ensure_ascii setting,\n+ as these characters are not valid in JavaScript strings (see\n+ http://timelessrepo.com/json-isnt-a-javascript-subset).\n+ \"\"\"\n+\n+ def encode(self, o):\n+ # Override JSONEncoder.encode because it has hacks for\n+ # performance that make things more complicated.\n+ chunks = self.iterencode(o, True)\n+ if self.ensure_ascii:\n+ return ''.join(chunks)\n+ else:\n+ return u''.join(chunks)\n+\n+ def iterencode(self, o, _one_shot=False):\n+ chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)\n+ for chunk in chunks:\n+ chunk = chunk.replace('&', '\\\\u0026')\n+ chunk = chunk.replace('<', '\\\\u003c')\n+ chunk = chunk.replace('>', '\\\\u003e')\n+\n+ if not self.ensure_ascii:\n+ chunk = chunk.replace(u'\\u2028', '\\\\u2028')\n+ chunk = chunk.replace(u'\\u2029', '\\\\u2029')\n+\n+ yield chunk\n+\n+\n+def json(value, default=custom_json, indent=None, sort_keys=False, cls=JSONEncoderForHTML):\n+ return json_parser.dumps(value, default=default, cls=cls, sort_keys=sort_keys, indent=indent)\n \n def csv(value):\n return ''\n", "issue": "Stored XSS / Frame injection\nIn (probably) all versions including latest 2.18.5 is it possible to find one example file which is vulnerable to XSS(reflected, stored) and frame injection. (I do not want to publicly disclose attack vector and specific file because is it still possible to exploit)\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis file is part of the web2py Web Framework\nCopyrighted by Massimo Di Pierro <[email protected]>\nLicense: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\"\"\"\nimport datetime\nimport decimal\nfrom gluon.storage import Storage\nfrom gluon.html import TAG, XmlComponent, xmlescape\nfrom gluon.languages import lazyT\nimport gluon.contrib.rss2 as rss2\nimport json as json_parser\nfrom gluon._compat import long, to_native, unicodeT, integer_types\n\nhave_yaml = True\ntry:\n import yaml as yamlib\nexcept ImportError:\n have_yaml = False\n\n\ndef cast_keys(o, cast=str, encoding=\"utf-8\"):\n \"\"\"\n Builds a new object with <cast> type keys.\n Use this function if you are in Python < 2.6.5\n This avoids syntax errors when unpacking dictionary arguments.\n\n Args:\n o: is the object input\n cast: (defaults to str) is an object type or function\n which supports conversion such as:\n\n converted = cast(o)\n\n encoding: (defaults to utf-8) is the encoding for unicode\n keys. This is not used for custom cast functions\n\n \"\"\"\n\n if isinstance(o, (dict, Storage)):\n if isinstance(o, dict):\n newobj = dict()\n else:\n newobj = Storage()\n for k, v in o.items():\n if (cast == str) and isinstance(k, unicodeT):\n key = k.encode(encoding)\n else:\n key = cast(k)\n newobj[key] = cast_keys(v, cast=cast, encoding=encoding)\n elif isinstance(o, (tuple, set, list)):\n newobj = []\n for item in o:\n newobj.append(cast_keys(item, cast=cast, encoding=encoding))\n if isinstance(o, tuple):\n newobj = tuple(newobj)\n elif isinstance(o, set):\n newobj = set(newobj)\n else:\n # no string cast (unknown object)\n newobj = o\n return newobj\n\n\ndef loads_json(o, unicode_keys=True, **kwargs):\n # deserialize a json string\n result = json_parser.loads(o, **kwargs)\n if not unicode_keys:\n # filter non-str keys in dictionary objects\n result = cast_keys(result,\n encoding=kwargs.get(\"encoding\", \"utf-8\"))\n return result\n\n\ndef custom_json(o):\n if hasattr(o, 'custom_json') and callable(o.custom_json):\n return o.custom_json()\n if isinstance(o, (datetime.date,\n datetime.datetime,\n datetime.time)):\n return o.isoformat()[:19].replace('T', ' ')\n elif isinstance(o, integer_types):\n return int(o)\n elif isinstance(o, decimal.Decimal):\n return str(o)\n elif isinstance(o, (bytes, bytearray)):\n return str(o)\n elif isinstance(o, lazyT):\n return str(o)\n elif isinstance(o, XmlComponent):\n return to_native(o.xml())\n elif isinstance(o, set):\n return list(o)\n elif hasattr(o, 'as_list') and callable(o.as_list):\n return o.as_list()\n elif hasattr(o, 'as_dict') and callable(o.as_dict):\n return o.as_dict()\n else:\n raise TypeError(repr(o) + \" is not JSON serializable\")\n\n\ndef xml_rec(value, key, quote=True):\n if hasattr(value, 'custom_xml') and callable(value.custom_xml):\n return value.custom_xml()\n elif isinstance(value, (dict, Storage)):\n return TAG[key](*[TAG[k](xml_rec(v, '', quote))\n for k, v in value.items()])\n elif isinstance(value, list):\n return TAG[key](*[TAG.item(xml_rec(item, '', quote)) for item in value])\n elif hasattr(value, 'as_list') and callable(value.as_list):\n return str(xml_rec(value.as_list(), '', quote))\n elif hasattr(value, 'as_dict') and callable(value.as_dict):\n return str(xml_rec(value.as_dict(), '', quote))\n else:\n return xmlescape(value, quote)\n\n\ndef xml(value, encoding='UTF-8', key='document', quote=True):\n return ('<?xml version=\"1.0\" encoding=\"%s\"?>' % encoding) + str(xml_rec(value, key, quote))\n\n\ndef json(value, default=custom_json, indent=None, sort_keys=False):\n value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)\n # replace JavaScript incompatible spacing\n # http://timelessrepo.com/json-isnt-a-javascript-subset\n # PY3 FIXME\n # return value.replace(ur'\\u2028', '\\\\u2028').replace(ur'\\2029', '\\\\u2029')\n return value\n\ndef csv(value):\n return ''\n\n\ndef ics(events, title=None, link=None, timeshift=0, calname=True,\n **ignored):\n title = title or '(unknown)'\n if link and not callable(link):\n link = lambda item, prefix=link: prefix.replace(\n '[id]', str(item['id']))\n s = 'BEGIN:VCALENDAR'\n s += '\\nVERSION:2.0'\n if not calname is False:\n s += '\\nX-WR-CALNAME:%s' % (calname or title)\n s += '\\nSUMMARY:%s' % title\n s += '\\nPRODID:Generated by web2py'\n s += '\\nCALSCALE:GREGORIAN'\n s += '\\nMETHOD:PUBLISH'\n for item in events:\n s += '\\nBEGIN:VEVENT'\n s += '\\nUID:%s' % item['id']\n if link:\n s += '\\nURL:%s' % link(item)\n shift = datetime.timedelta(seconds=3600 * timeshift)\n start = item['start_datetime'] + shift\n stop = item['stop_datetime'] + shift\n s += '\\nDTSTART:%s' % start.strftime('%Y%m%dT%H%M%S')\n s += '\\nDTEND:%s' % stop.strftime('%Y%m%dT%H%M%S')\n s += '\\nSUMMARY:%s' % item['title']\n s += '\\nEND:VEVENT'\n s += '\\nEND:VCALENDAR'\n return s\n\ndef safe_encode(text):\n if not isinstance(text, (str, unicodeT)):\n text = str(text)\n try:\n text = text.encode('utf8','replace')\n except ValueError:\n new_text = ''\n for c in text:\n try:\n new_text += c.encode('utf8')\n except:\n new_text += '?'\n text = new_text\n return text\n\ndef rss(feed):\n if not 'entries' in feed and 'items' in feed:\n feed['entries'] = feed['items']\n\n def safestr(obj, key, default=''):\n return safe_encode(obj.get(key,''))\n\n now = datetime.datetime.now()\n rss = rss2.RSS2(title=safestr(feed,'title'),\n link=safestr(feed,'link'),\n description=safestr(feed,'description'),\n lastBuildDate=feed.get('created_on', now),\n items=[rss2.RSSItem(\n title=safestr(entry,'title','(notitle)'),\n link=safestr(entry,'link'),\n description=safestr(entry,'description'),\n pubDate=entry.get('created_on', now)\n ) for entry in feed.get('entries', [])])\n return rss.to_xml(encoding='utf8')\n\n\ndef yaml(data):\n if have_yaml:\n return yamlib.dump(data)\n else:\n raise ImportError(\"No YAML serializer available\")\n\n\ndef loads_yaml(data):\n if have_yaml:\n return yamlib.load(data)\n else:\n raise ImportError(\"No YAML serializer available\")\n", "path": "gluon/serializers.py"}]} | 2,846 | 632 |
gh_patches_debug_32637 | rasdani/github-patches | git_diff | searxng__searxng-136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/preferences: use simple layout for the oscar theme
Suggestion: change how the options are displayed in the /preferences page in the oscar theme :
* General
* Default categories
* Search languages
* Autocomplete
* SafeSearch
* Open Access DOI rewrite (plugin)
* Open Access DOI resolver
* Engines
* User Interface
* Interface language
* Themes
* Results on new tabs
* Infinite scroll (plugin)
* Search on category select (plugin)
* Hotkeys (plugins)
* Privacy
* HTTP method
* Image proxy
* Tracker URL remover (plugin)
* Cookies
* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)
* Documentation about the query syntax (bang, external bangs, ...)
* Answers (list of configured answers)
* Special engines: currency, translations (list the engines that ).
* External bangs
/preferences: use simple layout for the oscar theme
Suggestion: change how the options are displayed in the /preferences page in the oscar theme :
* General
* Default categories
* Search languages
* Autocomplete
* SafeSearch
* Open Access DOI rewrite (plugin)
* Open Access DOI resolver
* Engines
* User Interface
* Interface language
* Themes
* Results on new tabs
* Infinite scroll (plugin)
* Search on category select (plugin)
* Hotkeys (plugins)
* Privacy
* HTTP method
* Image proxy
* Tracker URL remover (plugin)
* Cookies
* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)
* Documentation about the query syntax (bang, external bangs, ...)
* Answers (list of configured answers)
* Special engines: currency, translations (list the engines that ).
* External bangs
</issue>
<code>
[start of searx/plugins/hash_plugin.py]
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 (C) 2018, 2020 by Vaclav Zouzalik
17 '''
18
19 from flask_babel import gettext
20 import hashlib
21 import re
22
23 name = "Hash plugin"
24 description = gettext("Converts strings to different hash digests.")
25 default_on = True
26
27 parser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)
28
29
30 def post_search(request, search):
31 # process only on first page
32 if search.search_query.pageno > 1:
33 return True
34 m = parser_re.match(search.search_query.query)
35 if not m:
36 # wrong query
37 return True
38
39 function, string = m.groups()
40 if string.strip().__len__() == 0:
41 # end if the string is empty
42 return True
43
44 # select hash function
45 f = hashlib.new(function.lower())
46
47 # make digest from the given string
48 f.update(string.encode('utf-8').strip())
49 answer = function + " " + gettext('hash digest') + ": " + f.hexdigest()
50
51 # print result
52 search.result_container.answers.clear()
53 search.result_container.answers['hash'] = {'answer': answer}
54 return True
55
[end of searx/plugins/hash_plugin.py]
[start of searx/plugins/__init__.py]
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17
18 from hashlib import sha256
19 from importlib import import_module
20 from os import listdir, makedirs, remove, stat, utime
21 from os.path import abspath, basename, dirname, exists, join
22 from shutil import copyfile
23
24 from searx import logger, settings
25
26
27 logger = logger.getChild('plugins')
28
29 from searx.plugins import (oa_doi_rewrite,
30 ahmia_filter,
31 hash_plugin,
32 infinite_scroll,
33 self_info,
34 search_on_category_select,
35 tracker_url_remover,
36 vim_hotkeys)
37
38 required_attrs = (('name', str),
39 ('description', str),
40 ('default_on', bool))
41
42 optional_attrs = (('js_dependencies', tuple),
43 ('css_dependencies', tuple))
44
45
46 class Plugin():
47 default_on = False
48 name = 'Default plugin'
49 description = 'Default plugin description'
50
51
52 class PluginStore():
53
54 def __init__(self):
55 self.plugins = []
56
57 def __iter__(self):
58 for plugin in self.plugins:
59 yield plugin
60
61 def register(self, *plugins, external=False):
62 if external:
63 plugins = load_external_plugins(plugins)
64 for plugin in plugins:
65 for plugin_attr, plugin_attr_type in required_attrs:
66 if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
67 logger.critical('missing attribute "{0}", cannot load plugin: {1}'.format(plugin_attr, plugin))
68 exit(3)
69 for plugin_attr, plugin_attr_type in optional_attrs:
70 if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
71 setattr(plugin, plugin_attr, plugin_attr_type())
72 plugin.id = plugin.name.replace(' ', '_')
73 self.plugins.append(plugin)
74
75 def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):
76 ret = True
77 for plugin in ordered_plugin_list:
78 if hasattr(plugin, plugin_type):
79 ret = getattr(plugin, plugin_type)(request, *args, **kwargs)
80 if not ret:
81 break
82
83 return ret
84
85
86 def load_external_plugins(plugin_names):
87 plugins = []
88 for name in plugin_names:
89 logger.debug('loading plugin: {0}'.format(name))
90 try:
91 pkg = import_module(name)
92 except Exception as e:
93 logger.critical('failed to load plugin module {0}: {1}'.format(name, e))
94 exit(3)
95
96 pkg.__base_path = dirname(abspath(pkg.__file__))
97
98 prepare_package_resources(pkg, name)
99
100 plugins.append(pkg)
101 logger.debug('plugin "{0}" loaded'.format(name))
102 return plugins
103
104
105 def sync_resource(base_path, resource_path, name, target_dir, plugin_dir):
106 dep_path = join(base_path, resource_path)
107 file_name = basename(dep_path)
108 resource_path = join(target_dir, file_name)
109 if not exists(resource_path) or sha_sum(dep_path) != sha_sum(resource_path):
110 try:
111 copyfile(dep_path, resource_path)
112 # copy atime_ns and mtime_ns, so the weak ETags (generated by
113 # the HTTP server) do not change
114 dep_stat = stat(dep_path)
115 utime(resource_path, ns=(dep_stat.st_atime_ns, dep_stat.st_mtime_ns))
116 except:
117 logger.critical('failed to copy plugin resource {0} for plugin {1}'.format(file_name, name))
118 exit(3)
119
120 # returning with the web path of the resource
121 return join('plugins/external_plugins', plugin_dir, file_name)
122
123
124 def prepare_package_resources(pkg, name):
125 plugin_dir = 'plugin_' + name
126 target_dir = join(settings['ui']['static_path'], 'plugins/external_plugins', plugin_dir)
127 try:
128 makedirs(target_dir, exist_ok=True)
129 except:
130 logger.critical('failed to create resource directory {0} for plugin {1}'.format(target_dir, name))
131 exit(3)
132
133 resources = []
134
135 if hasattr(pkg, 'js_dependencies'):
136 resources.extend(map(basename, pkg.js_dependencies))
137 pkg.js_dependencies = tuple([
138 sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)
139 for x in pkg.js_dependencies
140 ])
141 if hasattr(pkg, 'css_dependencies'):
142 resources.extend(map(basename, pkg.css_dependencies))
143 pkg.css_dependencies = tuple([
144 sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)
145 for x in pkg.css_dependencies
146 ])
147
148 for f in listdir(target_dir):
149 if basename(f) not in resources:
150 resource_path = join(target_dir, basename(f))
151 try:
152 remove(resource_path)
153 except:
154 logger.critical('failed to remove unused resource file {0} for plugin {1}'.format(resource_path, name))
155 exit(3)
156
157
158 def sha_sum(filename):
159 with open(filename, "rb") as f:
160 file_content_bytes = f.read()
161 return sha256(file_content_bytes).hexdigest()
162
163
164 plugins = PluginStore()
165 plugins.register(oa_doi_rewrite)
166 plugins.register(hash_plugin)
167 plugins.register(infinite_scroll)
168 plugins.register(self_info)
169 plugins.register(search_on_category_select)
170 plugins.register(tracker_url_remover)
171 plugins.register(vim_hotkeys)
172 # load external plugins
173 if settings['plugins']:
174 plugins.register(*settings['plugins'], external=True)
175
176 if settings['enabled_plugins']:
177 for plugin in plugins:
178 if plugin.name in settings['enabled_plugins']:
179 plugin.default_on = True
180 else:
181 plugin.default_on = False
182
183 # load tor specific plugins
184 if settings['outgoing']['using_tor_proxy']:
185 plugins.register(ahmia_filter)
186
[end of searx/plugins/__init__.py]
[start of searx/plugins/self_info.py]
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17 from flask_babel import gettext
18 import re
19 name = gettext('Self Informations')
20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
21 default_on = True
22
23
24 # Self User Agent regex
25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
26
27
28 # attach callback to the post search hook
29 # request: flask request object
30 # ctx: the whole local context of the pre search hook
31 def post_search(request, search):
32 if search.search_query.pageno > 1:
33 return True
34 if search.search_query.query == 'ip':
35 x_forwarded_for = request.headers.getlist("X-Forwarded-For")
36 if x_forwarded_for:
37 ip = x_forwarded_for[0]
38 else:
39 ip = request.remote_addr
40 search.result_container.answers['ip'] = {'answer': ip}
41 elif p.match(search.search_query.query):
42 ua = request.user_agent
43 search.result_container.answers['user-agent'] = {'answer': ua}
44 return True
45
[end of searx/plugins/self_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/plugins/__init__.py b/searx/plugins/__init__.py
--- a/searx/plugins/__init__.py
+++ b/searx/plugins/__init__.py
@@ -70,6 +70,13 @@
if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
setattr(plugin, plugin_attr, plugin_attr_type())
plugin.id = plugin.name.replace(' ', '_')
+ if not hasattr(plugin, 'preference_section'):
+ plugin.preference_section = 'general'
+ if plugin.preference_section == 'query':
+ for plugin_attr in ('query_keywords', 'query_examples'):
+ if not hasattr(plugin, plugin_attr):
+ logger.critical('missing attribute "{0}", cannot load plugin: {1}'.format(plugin_attr, plugin))
+ exit(3)
self.plugins.append(plugin)
def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):
diff --git a/searx/plugins/hash_plugin.py b/searx/plugins/hash_plugin.py
--- a/searx/plugins/hash_plugin.py
+++ b/searx/plugins/hash_plugin.py
@@ -23,6 +23,9 @@
name = "Hash plugin"
description = gettext("Converts strings to different hash digests.")
default_on = True
+preference_section = 'query'
+query_keywords = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
+query_examples = 'sha512 The quick brown fox jumps over the lazy dog'
parser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)
diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py
--- a/searx/plugins/self_info.py
+++ b/searx/plugins/self_info.py
@@ -19,7 +19,9 @@
name = gettext('Self Informations')
description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
default_on = True
-
+preference_section = 'query'
+query_keywords = ['user-agent']
+query_examples = ''
# Self User Agent regex
p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
| {"golden_diff": "diff --git a/searx/plugins/__init__.py b/searx/plugins/__init__.py\n--- a/searx/plugins/__init__.py\n+++ b/searx/plugins/__init__.py\n@@ -70,6 +70,13 @@\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n setattr(plugin, plugin_attr, plugin_attr_type())\n plugin.id = plugin.name.replace(' ', '_')\n+ if not hasattr(plugin, 'preference_section'):\n+ plugin.preference_section = 'general'\n+ if plugin.preference_section == 'query':\n+ for plugin_attr in ('query_keywords', 'query_examples'):\n+ if not hasattr(plugin, plugin_attr):\n+ logger.critical('missing attribute \"{0}\", cannot load plugin: {1}'.format(plugin_attr, plugin))\n+ exit(3)\n self.plugins.append(plugin)\n \n def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):\ndiff --git a/searx/plugins/hash_plugin.py b/searx/plugins/hash_plugin.py\n--- a/searx/plugins/hash_plugin.py\n+++ b/searx/plugins/hash_plugin.py\n@@ -23,6 +23,9 @@\n name = \"Hash plugin\"\n description = gettext(\"Converts strings to different hash digests.\")\n default_on = True\n+preference_section = 'query'\n+query_keywords = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']\n+query_examples = 'sha512 The quick brown fox jumps over the lazy dog'\n \n parser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)\n \ndiff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -19,7 +19,9 @@\n name = gettext('Self Informations')\n description = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\n default_on = True\n-\n+preference_section = 'query'\n+query_keywords = ['user-agent']\n+query_examples = ''\n \n # Self User Agent regex\n p = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n", "issue": "/preferences: use simple layout for the oscar theme\nSuggestion: change how the options are displayed in the /preferences page in the oscar theme :\r\n* General\r\n * Default categories\r\n * Search languages\r\n * Autocomplete\r\n * SafeSearch\r\n * Open Access DOI rewrite (plugin)\r\n * Open Access DOI resolver\r\n* Engines\r\n* User Interface\r\n * Interface language\r\n * Themes\r\n * Results on new tabs\r\n * Infinite scroll (plugin)\r\n * Search on category select (plugin)\r\n * Hotkeys (plugins)\r\n* Privacy\r\n * HTTP method\r\n * Image proxy\r\n * Tracker URL remover (plugin)\r\n* Cookies\r\n* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)\r\n * Documentation about the query syntax (bang, external bangs, ...)\r\n * Answers (list of configured answers)\r\n * Special engines: currency, translations (list the engines that ).\r\n * External bangs\r\n\n/preferences: use simple layout for the oscar theme\nSuggestion: change how the options are displayed in the /preferences page in the oscar theme :\r\n* General\r\n * Default categories\r\n * Search languages\r\n * Autocomplete\r\n * SafeSearch\r\n * Open Access DOI rewrite (plugin)\r\n * Open Access DOI resolver\r\n* Engines\r\n* User Interface\r\n * Interface language\r\n * Themes\r\n * Results on new tabs\r\n * Infinite scroll (plugin)\r\n * Search on category select (plugin)\r\n * Hotkeys (plugins)\r\n* Privacy\r\n * HTTP method\r\n * Image proxy\r\n * Tracker URL remover (plugin)\r\n* Cookies\r\n* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)\r\n * Documentation about the query syntax (bang, external bangs, ...)\r\n * Answers (list of configured answers)\r\n * Special engines: currency, translations (list the engines that ).\r\n * External bangs\r\n\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n(C) 2018, 2020 by Vaclav Zouzalik\n'''\n\nfrom flask_babel import gettext\nimport hashlib\nimport re\n\nname = \"Hash plugin\"\ndescription = gettext(\"Converts strings to different hash digests.\")\ndefault_on = True\n\nparser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)\n\n\ndef post_search(request, search):\n # process only on first page\n if search.search_query.pageno > 1:\n return True\n m = parser_re.match(search.search_query.query)\n if not m:\n # wrong query\n return True\n\n function, string = m.groups()\n if string.strip().__len__() == 0:\n # end if the string is empty\n return True\n\n # select hash function\n f = hashlib.new(function.lower())\n\n # make digest from the given string\n f.update(string.encode('utf-8').strip())\n answer = function + \" \" + gettext('hash digest') + \": \" + f.hexdigest()\n\n # print result\n search.result_container.answers.clear()\n search.result_container.answers['hash'] = {'answer': answer}\n return True\n", "path": "searx/plugins/hash_plugin.py"}, {"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\n\nfrom hashlib import sha256\nfrom importlib import import_module\nfrom os import listdir, makedirs, remove, stat, utime\nfrom os.path import abspath, basename, dirname, exists, join\nfrom shutil import copyfile\n\nfrom searx import logger, settings\n\n\nlogger = logger.getChild('plugins')\n\nfrom searx.plugins import (oa_doi_rewrite,\n ahmia_filter,\n hash_plugin,\n infinite_scroll,\n self_info,\n search_on_category_select,\n tracker_url_remover,\n vim_hotkeys)\n\nrequired_attrs = (('name', str),\n ('description', str),\n ('default_on', bool))\n\noptional_attrs = (('js_dependencies', tuple),\n ('css_dependencies', tuple))\n\n\nclass Plugin():\n default_on = False\n name = 'Default plugin'\n description = 'Default plugin description'\n\n\nclass PluginStore():\n\n def __init__(self):\n self.plugins = []\n\n def __iter__(self):\n for plugin in self.plugins:\n yield plugin\n\n def register(self, *plugins, external=False):\n if external:\n plugins = load_external_plugins(plugins)\n for plugin in plugins:\n for plugin_attr, plugin_attr_type in required_attrs:\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n logger.critical('missing attribute \"{0}\", cannot load plugin: {1}'.format(plugin_attr, plugin))\n exit(3)\n for plugin_attr, plugin_attr_type in optional_attrs:\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n setattr(plugin, plugin_attr, plugin_attr_type())\n plugin.id = plugin.name.replace(' ', '_')\n self.plugins.append(plugin)\n\n def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):\n ret = True\n for plugin in ordered_plugin_list:\n if hasattr(plugin, plugin_type):\n ret = getattr(plugin, plugin_type)(request, *args, **kwargs)\n if not ret:\n break\n\n return ret\n\n\ndef load_external_plugins(plugin_names):\n plugins = []\n for name in plugin_names:\n logger.debug('loading plugin: {0}'.format(name))\n try:\n pkg = import_module(name)\n except Exception as e:\n logger.critical('failed to load plugin module {0}: {1}'.format(name, e))\n exit(3)\n\n pkg.__base_path = dirname(abspath(pkg.__file__))\n\n prepare_package_resources(pkg, name)\n\n plugins.append(pkg)\n logger.debug('plugin \"{0}\" loaded'.format(name))\n return plugins\n\n\ndef sync_resource(base_path, resource_path, name, target_dir, plugin_dir):\n dep_path = join(base_path, resource_path)\n file_name = basename(dep_path)\n resource_path = join(target_dir, file_name)\n if not exists(resource_path) or sha_sum(dep_path) != sha_sum(resource_path):\n try:\n copyfile(dep_path, resource_path)\n # copy atime_ns and mtime_ns, so the weak ETags (generated by\n # the HTTP server) do not change\n dep_stat = stat(dep_path)\n utime(resource_path, ns=(dep_stat.st_atime_ns, dep_stat.st_mtime_ns))\n except:\n logger.critical('failed to copy plugin resource {0} for plugin {1}'.format(file_name, name))\n exit(3)\n\n # returning with the web path of the resource\n return join('plugins/external_plugins', plugin_dir, file_name)\n\n\ndef prepare_package_resources(pkg, name):\n plugin_dir = 'plugin_' + name\n target_dir = join(settings['ui']['static_path'], 'plugins/external_plugins', plugin_dir)\n try:\n makedirs(target_dir, exist_ok=True)\n except:\n logger.critical('failed to create resource directory {0} for plugin {1}'.format(target_dir, name))\n exit(3)\n\n resources = []\n\n if hasattr(pkg, 'js_dependencies'):\n resources.extend(map(basename, pkg.js_dependencies))\n pkg.js_dependencies = tuple([\n sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)\n for x in pkg.js_dependencies\n ])\n if hasattr(pkg, 'css_dependencies'):\n resources.extend(map(basename, pkg.css_dependencies))\n pkg.css_dependencies = tuple([\n sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)\n for x in pkg.css_dependencies\n ])\n\n for f in listdir(target_dir):\n if basename(f) not in resources:\n resource_path = join(target_dir, basename(f))\n try:\n remove(resource_path)\n except:\n logger.critical('failed to remove unused resource file {0} for plugin {1}'.format(resource_path, name))\n exit(3)\n\n\ndef sha_sum(filename):\n with open(filename, \"rb\") as f:\n file_content_bytes = f.read()\n return sha256(file_content_bytes).hexdigest()\n\n\nplugins = PluginStore()\nplugins.register(oa_doi_rewrite)\nplugins.register(hash_plugin)\nplugins.register(infinite_scroll)\nplugins.register(self_info)\nplugins.register(search_on_category_select)\nplugins.register(tracker_url_remover)\nplugins.register(vim_hotkeys)\n# load external plugins\nif settings['plugins']:\n plugins.register(*settings['plugins'], external=True)\n\nif settings['enabled_plugins']:\n for plugin in plugins:\n if plugin.name in settings['enabled_plugins']:\n plugin.default_on = True\n else:\n plugin.default_on = False\n\n# load tor specific plugins\nif settings['outgoing']['using_tor_proxy']:\n plugins.register(ahmia_filter)\n", "path": "searx/plugins/__init__.py"}, {"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = gettext('Self Informations')\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n if search.search_query.query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n search.result_container.answers['ip'] = {'answer': ip}\n elif p.match(search.search_query.query):\n ua = request.user_agent\n search.result_container.answers['user-agent'] = {'answer': ua}\n return True\n", "path": "searx/plugins/self_info.py"}]} | 3,847 | 531 |
gh_patches_debug_23437 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rhn_channel is using depreciated methods
Issue Type:
Bug Report
Ansible Version:
1.7.2
Environment:
OS X 10.9 / RHEL 6/7
Summary:
When adding a child channel to rhn sat server rhn_channel fails with 500 error.
Steps To Reproduce:
attempt to subscribe to a child channel with rhn_channel to sat server 5.4+
Expected Results:
Server gets subscribed to child channel.
Actual Results:
xmlrpclib.ProtocolError: <ProtocolError for redhat.example.com/rpc/api: 500 Internal Server Error>
channel.software.setSystemChannels is depreciated.
https://access.redhat.com/documentation/en-US/Red_Hat_Network_Satellite/5.5/html/API_Overview/files/html/handlers/ChannelSoftwareHandler.html#setSystemChannels
PR to follow
</issue>
<code>
[start of packaging/os/rhn_channel.py]
1 #!/usr/bin/python
2
3 # (c) Vincent Van de Kussen
4 #
5 # This file is part of Ansible
6 #
7 # Ansible is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # Ansible is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
19
20 DOCUMENTATION = '''
21 ---
22 module: rhn_channel
23 short_description: Adds or removes Red Hat software channels
24 description:
25 - Adds or removes Red Hat software channels
26 version_added: "1.1"
27 author: Vincent Van der Kussen
28 notes:
29 - this module fetches the system id from RHN.
30 requirements:
31 - none
32 options:
33 name:
34 description:
35 - name of the software channel
36 required: true
37 default: null
38 sysname:
39 description:
40 - name of the system as it is known in RHN/Satellite
41 required: true
42 default: null
43 state:
44 description:
45 - whether the channel should be present or not
46 required: false
47 default: present
48 url:
49 description:
50 - The full url to the RHN/Satellite api
51 required: true
52 user:
53 description:
54 - RHN/Satellite user
55 required: true
56 password:
57 description:
58 - "the user's password"
59 required: true
60 '''
61
62 EXAMPLES = '''
63 - rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme
64 '''
65
66 import xmlrpclib
67 from operator import itemgetter
68 import re
69
70
71 # ------------------------------------------------------- #
72
73 def get_systemid(client, session, sysname):
74 systems = client.system.listUserSystems(session)
75 for system in systems:
76 if system.get('name') == sysname:
77 idres = system.get('id')
78 idd = int(idres)
79 return idd
80
81 # ------------------------------------------------------- #
82
83 # unused:
84 #
85 #def get_localsystemid():
86 # f = open("/etc/sysconfig/rhn/systemid", "r")
87 # content = f.read()
88 # loc_id = re.search(r'\b(ID-)(\d{10})' ,content)
89 # return loc_id.group(2)
90
91 # ------------------------------------------------------- #
92
93 def subscribe_channels(channels, client, session, sysname, sys_id):
94 c = base_channels(client, session, sys_id)
95 c.append(channels)
96 return client.channel.software.setSystemChannels(session, sys_id, c)
97
98 # ------------------------------------------------------- #
99
100 def unsubscribe_channels(channels, client, session, sysname, sys_id):
101 c = base_channels(client, session, sys_id)
102 c.remove(channels)
103 return client.channel.software.setSystemChannels(session, sys_id, c)
104
105 # ------------------------------------------------------- #
106
107 def base_channels(client, session, sys_id):
108 basechan = client.channel.software.listSystemChannels(session, sys_id)
109 try:
110 chans = [item['label'] for item in basechan]
111 except KeyError:
112 chans = [item['channel_label'] for item in basechan]
113 return chans
114
115 # ------------------------------------------------------- #
116
117
118 def main():
119
120 module = AnsibleModule(
121 argument_spec = dict(
122 state = dict(default='present', choices=['present', 'absent']),
123 name = dict(required=True),
124 sysname = dict(required=True),
125 url = dict(required=True),
126 user = dict(required=True),
127 password = dict(required=True, aliases=['pwd']),
128 )
129 # supports_check_mode=True
130 )
131
132 state = module.params['state']
133 channelname = module.params['name']
134 systname = module.params['sysname']
135 saturl = module.params['url']
136 user = module.params['user']
137 password = module.params['password']
138
139 #initialize connection
140 client = xmlrpclib.Server(saturl, verbose=0)
141 session = client.auth.login(user, password)
142
143 # get systemid
144 sys_id = get_systemid(client, session, systname)
145
146 # get channels for system
147 chans = base_channels(client, session, sys_id)
148
149
150 if state == 'present':
151 if channelname in chans:
152 module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
153 else:
154 subscribe_channels(channelname, client, session, systname, sys_id)
155 module.exit_json(changed=True, msg="Channel %s added" % channelname)
156
157 if state == 'absent':
158 if not channelname in chans:
159 module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
160 else:
161 unsubscribe_channels(channelname, client, session, systname, sys_id)
162 module.exit_json(changed=True, msg="Channel %s removed" % channelname)
163
164 client.auth.logout(session)
165
166
167 # import module snippets
168 from ansible.module_utils.basic import *
169 main()
170
[end of packaging/os/rhn_channel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py
--- a/packaging/os/rhn_channel.py
+++ b/packaging/os/rhn_channel.py
@@ -90,17 +90,17 @@
# ------------------------------------------------------- #
-def subscribe_channels(channels, client, session, sysname, sys_id):
- c = base_channels(client, session, sys_id)
- c.append(channels)
- return client.channel.software.setSystemChannels(session, sys_id, c)
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
-def unsubscribe_channels(channels, client, session, sysname, sys_id):
- c = base_channels(client, session, sys_id)
- c.remove(channels)
- return client.channel.software.setSystemChannels(session, sys_id, c)
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
@@ -167,3 +167,4 @@
# import module snippets
from ansible.module_utils.basic import *
main()
+
| {"golden_diff": "diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py\n--- a/packaging/os/rhn_channel.py\n+++ b/packaging/os/rhn_channel.py\n@@ -90,17 +90,17 @@\n \n # ------------------------------------------------------- #\n \n-def subscribe_channels(channels, client, session, sysname, sys_id):\n- c = base_channels(client, session, sys_id)\n- c.append(channels)\n- return client.channel.software.setSystemChannels(session, sys_id, c)\n+def subscribe_channels(channelname, client, session, sysname, sys_id):\n+ channels = base_channels(client, session, sys_id)\n+ channels.append(channelname)\n+ return client.system.setChildChannels(session, sys_id, channels)\n \n # ------------------------------------------------------- #\n \n-def unsubscribe_channels(channels, client, session, sysname, sys_id):\n- c = base_channels(client, session, sys_id)\n- c.remove(channels)\n- return client.channel.software.setSystemChannels(session, sys_id, c)\n+def unsubscribe_channels(channelname, client, session, sysname, sys_id):\n+ channels = base_channels(client, session, sys_id)\n+ channels.remove(channelname)\n+ return client.system.setChildChannels(session, sys_id, channels)\n \n # ------------------------------------------------------- #\n \n@@ -167,3 +167,4 @@\n # import module snippets\n from ansible.module_utils.basic import *\n main()\n+\n", "issue": "rhn_channel is using depreciated methods\nIssue Type:\nBug Report\n\nAnsible Version: \n1.7.2\n\nEnvironment: \nOS X 10.9 / RHEL 6/7\n\nSummary:\nWhen adding a child channel to rhn sat server rhn_channel fails with 500 error. \n\nSteps To Reproduce:\nattempt to subscribe to a child channel with rhn_channel to sat server 5.4+\n\nExpected Results:\nServer gets subscribed to child channel.\n\nActual Results:\nxmlrpclib.ProtocolError: <ProtocolError for redhat.example.com/rpc/api: 500 Internal Server Error>\n\nchannel.software.setSystemChannels is depreciated. \nhttps://access.redhat.com/documentation/en-US/Red_Hat_Network_Satellite/5.5/html/API_Overview/files/html/handlers/ChannelSoftwareHandler.html#setSystemChannels\n\nPR to follow\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# (c) Vincent Van de Kussen\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: rhn_channel\nshort_description: Adds or removes Red Hat software channels\ndescription:\n - Adds or removes Red Hat software channels\nversion_added: \"1.1\"\nauthor: Vincent Van der Kussen\nnotes:\n - this module fetches the system id from RHN. \nrequirements:\n - none\noptions:\n name:\n description:\n - name of the software channel\n required: true\n default: null\n sysname:\n description:\n - name of the system as it is known in RHN/Satellite\n required: true\n default: null\n state:\n description:\n - whether the channel should be present or not\n required: false\n default: present\n url:\n description: \n - The full url to the RHN/Satellite api\n required: true\n user:\n description:\n - RHN/Satellite user\n required: true\n password:\n description:\n - \"the user's password\"\n required: true\n'''\n\nEXAMPLES = '''\n- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme\n'''\n\nimport xmlrpclib\nfrom operator import itemgetter\nimport re\n\n\n# ------------------------------------------------------- #\n\ndef get_systemid(client, session, sysname):\n systems = client.system.listUserSystems(session)\n for system in systems:\n if system.get('name') == sysname:\n idres = system.get('id')\n idd = int(idres)\n return idd\n\n# ------------------------------------------------------- #\n\n# unused:\n#\n#def get_localsystemid():\n# f = open(\"/etc/sysconfig/rhn/systemid\", \"r\")\n# content = f.read()\n# loc_id = re.search(r'\\b(ID-)(\\d{10})' ,content)\n# return loc_id.group(2)\n\n# ------------------------------------------------------- #\n\ndef subscribe_channels(channels, client, session, sysname, sys_id):\n c = base_channels(client, session, sys_id)\n c.append(channels)\n return client.channel.software.setSystemChannels(session, sys_id, c)\n\n# ------------------------------------------------------- #\n\ndef unsubscribe_channels(channels, client, session, sysname, sys_id):\n c = base_channels(client, session, sys_id)\n c.remove(channels)\n return client.channel.software.setSystemChannels(session, sys_id, c)\n\n# ------------------------------------------------------- #\n\ndef base_channels(client, session, sys_id):\n basechan = client.channel.software.listSystemChannels(session, sys_id)\n try:\n chans = [item['label'] for item in basechan]\n except KeyError:\n chans = [item['channel_label'] for item in basechan]\n return chans\n\n# ------------------------------------------------------- #\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent']),\n name = dict(required=True),\n sysname = dict(required=True),\n url = dict(required=True),\n user = dict(required=True),\n password = dict(required=True, aliases=['pwd']),\n )\n# supports_check_mode=True\n )\n\n state = module.params['state']\n channelname = module.params['name']\n systname = module.params['sysname']\n saturl = module.params['url']\n user = module.params['user']\n password = module.params['password']\n \n #initialize connection\n client = xmlrpclib.Server(saturl, verbose=0)\n session = client.auth.login(user, password)\n \n # get systemid\n sys_id = get_systemid(client, session, systname)\n\n # get channels for system\n chans = base_channels(client, session, sys_id)\n \n \n if state == 'present':\n if channelname in chans:\n module.exit_json(changed=False, msg=\"Channel %s already exists\" % channelname)\n else:\n subscribe_channels(channelname, client, session, systname, sys_id)\n module.exit_json(changed=True, msg=\"Channel %s added\" % channelname)\n\n if state == 'absent':\n if not channelname in chans:\n module.exit_json(changed=False, msg=\"Not subscribed to channel %s.\" % channelname)\n else:\n unsubscribe_channels(channelname, client, session, systname, sys_id)\n module.exit_json(changed=True, msg=\"Channel %s removed\" % channelname)\n\n client.auth.logout(session)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "packaging/os/rhn_channel.py"}]} | 2,310 | 315 |
gh_patches_debug_4966 | rasdani/github-patches | git_diff | getpelican__pelican-1219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CATEGORIES_URL doesn't honor it's default value
First of all, thanks for this great software. But, when I'm using the variable CATEGORIES_URL in templates (category.html), it doesn't have it's default value (as said on the documentation: CATEGORIES_URL ('categories/')). I need to explicitly set this value in pelican.conf. I'm using Pelican 3.3.0
Thanks
</issue>
<code>
[start of pelican/settings.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals, print_function
3 import six
4
5 import copy
6 import inspect
7 import os
8 import locale
9 import logging
10
11 try:
12 # SourceFileLoader is the recommended way in 3.3+
13 from importlib.machinery import SourceFileLoader
14 load_source = lambda name, path: SourceFileLoader(name, path).load_module()
15 except ImportError:
16 # but it does not exist in 3.2-, so fall back to imp
17 import imp
18 load_source = imp.load_source
19
20 from os.path import isabs
21
22
23 logger = logging.getLogger(__name__)
24
25
26 DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
27 'themes', 'notmyidea')
28 DEFAULT_CONFIG = {
29 'PATH': os.curdir,
30 'ARTICLE_DIR': '',
31 'ARTICLE_EXCLUDES': ('pages',),
32 'PAGE_DIR': 'pages',
33 'PAGE_EXCLUDES': (),
34 'THEME': DEFAULT_THEME,
35 'OUTPUT_PATH': 'output',
36 'READERS': {},
37 'STATIC_PATHS': ['images', ],
38 'THEME_STATIC_DIR': 'theme',
39 'THEME_STATIC_PATHS': ['static', ],
40 'FEED_ALL_ATOM': os.path.join('feeds', 'all.atom.xml'),
41 'CATEGORY_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),
42 'TRANSLATION_FEED_ATOM': os.path.join('feeds', 'all-%s.atom.xml'),
43 'FEED_MAX_ITEMS': '',
44 'SITEURL': '',
45 'SITENAME': 'A Pelican Blog',
46 'DISPLAY_PAGES_ON_MENU': True,
47 'DISPLAY_CATEGORIES_ON_MENU': True,
48 'OUTPUT_SOURCES': False,
49 'OUTPUT_SOURCES_EXTENSION': '.text',
50 'USE_FOLDER_AS_CATEGORY': True,
51 'DEFAULT_CATEGORY': 'misc',
52 'WITH_FUTURE_DATES': True,
53 'CSS_FILE': 'main.css',
54 'NEWEST_FIRST_ARCHIVES': True,
55 'REVERSE_CATEGORY_ORDER': False,
56 'DELETE_OUTPUT_DIRECTORY': False,
57 'OUTPUT_RETENTION': (),
58 'ARTICLE_URL': '{slug}.html',
59 'ARTICLE_SAVE_AS': '{slug}.html',
60 'ARTICLE_LANG_URL': '{slug}-{lang}.html',
61 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',
62 'PAGE_URL': 'pages/{slug}.html',
63 'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'),
64 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',
65 'PAGE_LANG_SAVE_AS': os.path.join('pages', '{slug}-{lang}.html'),
66 'STATIC_URL': '{path}',
67 'STATIC_SAVE_AS': '{path}',
68 'PDF_GENERATOR': False,
69 'PDF_STYLE_PATH': '',
70 'PDF_STYLE': 'twelvepoint',
71 'CATEGORY_URL': 'category/{slug}.html',
72 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),
73 'TAG_URL': 'tag/{slug}.html',
74 'TAG_SAVE_AS': os.path.join('tag', '{slug}.html'),
75 'AUTHOR_URL': 'author/{slug}.html',
76 'AUTHOR_SAVE_AS': os.path.join('author', '{slug}.html'),
77 'PAGINATION_PATTERNS': [
78 (0, '{name}{number}.html', '{name}{number}.html'),
79 ],
80 'YEAR_ARCHIVE_SAVE_AS': False,
81 'MONTH_ARCHIVE_SAVE_AS': False,
82 'DAY_ARCHIVE_SAVE_AS': False,
83 'RELATIVE_URLS': False,
84 'DEFAULT_LANG': 'en',
85 'TAG_CLOUD_STEPS': 4,
86 'TAG_CLOUD_MAX_ITEMS': 100,
87 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'authors', 'archives'),
88 'EXTRA_TEMPLATES_PATHS': [],
89 'PAGINATED_DIRECT_TEMPLATES': ('index', ),
90 'PELICAN_CLASS': 'pelican.Pelican',
91 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',
92 'DATE_FORMATS': {},
93 'ASCIIDOC_OPTIONS': [],
94 'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'],
95 'JINJA_EXTENSIONS': [],
96 'JINJA_FILTERS': {},
97 'LOCALE': [''], # defaults to user locale
98 'DEFAULT_PAGINATION': False,
99 'DEFAULT_ORPHANS': 0,
100 'DEFAULT_METADATA': (),
101 'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*',
102 'PATH_METADATA': '',
103 'EXTRA_PATH_METADATA': {},
104 'DEFAULT_STATUS': 'published',
105 'ARTICLE_PERMALINK_STRUCTURE': '',
106 'TYPOGRIFY': False,
107 'SUMMARY_MAX_LENGTH': 50,
108 'PLUGIN_PATH': '',
109 'PLUGINS': [],
110 'PYGMENTS_RST_OPTIONS': {},
111 'TEMPLATE_PAGES': {},
112 'IGNORE_FILES': ['.#*'],
113 'SLUG_SUBSTITUTIONS': (),
114 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',
115 }
116
117 PYGMENTS_RST_OPTIONS = None
118
119
120 def read_settings(path=None, override=None):
121 if path:
122 local_settings = get_settings_from_file(path)
123 # Make the paths relative to the settings file
124 for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'PLUGIN_PATH']:
125 if p in local_settings and local_settings[p] is not None \
126 and not isabs(local_settings[p]):
127 absp = os.path.abspath(os.path.normpath(os.path.join(
128 os.path.dirname(path), local_settings[p])))
129 if p not in ('THEME', 'PLUGIN_PATH') or os.path.exists(absp):
130 local_settings[p] = absp
131 else:
132 local_settings = copy.deepcopy(DEFAULT_CONFIG)
133
134 if override:
135 local_settings.update(override)
136
137 parsed_settings = configure_settings(local_settings)
138 # This is because there doesn't seem to be a way to pass extra
139 # parameters to docutils directive handlers, so we have to have a
140 # variable here that we'll import from within Pygments.run (see
141 # rstdirectives.py) to see what the user defaults were.
142 global PYGMENTS_RST_OPTIONS
143 PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)
144 return parsed_settings
145
146
147 def get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG):
148 """Loads settings from a module, returns a dictionary."""
149
150 context = copy.deepcopy(default_settings)
151 if module is not None:
152 context.update(
153 (k, v) for k, v in inspect.getmembers(module) if k.isupper())
154 return context
155
156
157 def get_settings_from_file(path, default_settings=DEFAULT_CONFIG):
158 """Loads settings from a file path, returning a dict."""
159
160 name, ext = os.path.splitext(os.path.basename(path))
161 module = load_source(name, path)
162 return get_settings_from_module(module, default_settings=default_settings)
163
164
165 def configure_settings(settings):
166 """Provide optimizations, error checking and warnings for the given
167 settings.
168
169 """
170 if not 'PATH' in settings or not os.path.isdir(settings['PATH']):
171 raise Exception('You need to specify a path containing the content'
172 ' (see pelican --help for more information)')
173
174 # lookup the theme in "pelican/themes" if the given one doesn't exist
175 if not os.path.isdir(settings['THEME']):
176 theme_path = os.path.join(
177 os.path.dirname(os.path.abspath(__file__)),
178 'themes',
179 settings['THEME'])
180 if os.path.exists(theme_path):
181 settings['THEME'] = theme_path
182 else:
183 raise Exception("Could not find the theme %s"
184 % settings['THEME'])
185
186 # standardize strings to lowercase strings
187 for key in [
188 'DEFAULT_LANG',
189 ]:
190 if key in settings:
191 settings[key] = settings[key].lower()
192
193 # standardize strings to lists
194 for key in [
195 'LOCALE',
196 ]:
197 if key in settings and isinstance(settings[key], six.string_types):
198 settings[key] = [settings[key]]
199
200 # check settings that must be a particular type
201 for key, types in [
202 ('OUTPUT_SOURCES_EXTENSION', six.string_types),
203 ('FILENAME_METADATA', six.string_types),
204 ]:
205 if key in settings and not isinstance(settings[key], types):
206 value = settings.pop(key)
207 logger.warn(
208 'Detected misconfigured {} ({}), '
209 'falling back to the default ({})'.format(
210 key, value, DEFAULT_CONFIG[key]))
211
212 # try to set the different locales, fallback on the default.
213 locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])
214
215 for locale_ in locales:
216 try:
217 locale.setlocale(locale.LC_ALL, str(locale_))
218 break # break if it is successful
219 except locale.Error:
220 pass
221 else:
222 logger.warning("LOCALE option doesn't contain a correct value")
223
224 if ('SITEURL' in settings):
225 # If SITEURL has a trailing slash, remove it and provide a warning
226 siteurl = settings['SITEURL']
227 if (siteurl.endswith('/')):
228 settings['SITEURL'] = siteurl[:-1]
229 logger.warning("Removed extraneous trailing slash from SITEURL.")
230 # If SITEURL is defined but FEED_DOMAIN isn't,
231 # set FEED_DOMAIN to SITEURL
232 if not 'FEED_DOMAIN' in settings:
233 settings['FEED_DOMAIN'] = settings['SITEURL']
234
235 # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
236 feed_keys = [
237 'FEED_ATOM', 'FEED_RSS',
238 'FEED_ALL_ATOM', 'FEED_ALL_RSS',
239 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',
240 'TAG_FEED_ATOM', 'TAG_FEED_RSS',
241 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',
242 ]
243
244 if any(settings.get(k) for k in feed_keys):
245 if not settings.get('SITEURL'):
246 logger.warning('Feeds generated without SITEURL set properly may'
247 ' not be valid')
248
249 if not 'TIMEZONE' in settings:
250 logger.warning(
251 'No timezone information specified in the settings. Assuming'
252 ' your timezone is UTC for feed generation. Check '
253 'http://docs.getpelican.com/en/latest/settings.html#timezone '
254 'for more information')
255
256 # fix up pagination rules
257 from pelican.paginator import PaginationRule
258 pagination_rules = [
259 PaginationRule(*r) for r in settings.get(
260 'PAGINATION_PATTERNS',
261 DEFAULT_CONFIG['PAGINATION_PATTERNS'],
262 )
263 ]
264 settings['PAGINATION_PATTERNS'] = sorted(
265 pagination_rules,
266 key=lambda r: r[0],
267 )
268
269 # Save people from accidentally setting a string rather than a list
270 path_keys = (
271 'ARTICLE_EXCLUDES',
272 'DEFAULT_METADATA',
273 'DIRECT_TEMPLATES',
274 'EXTRA_TEMPLATES_PATHS',
275 'FILES_TO_COPY',
276 'IGNORE_FILES',
277 'JINJA_EXTENSIONS',
278 'PAGINATED_DIRECT_TEMPLATES',
279 'PLUGINS',
280 'STATIC_PATHS',
281 'THEME_STATIC_PATHS',
282 )
283 for PATH_KEY in filter(lambda k: k in settings, path_keys):
284 if isinstance(settings[PATH_KEY], six.string_types):
285 logger.warning("Detected misconfiguration with %s setting "
286 "(must be a list), falling back to the default"
287 % PATH_KEY)
288 settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]
289
290 for old, new, doc in [
291 ('LESS_GENERATOR', 'the Webassets plugin', None),
292 ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',
293 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),
294 ]:
295 if old in settings:
296 message = 'The {} setting has been removed in favor of {}'.format(
297 old, new)
298 if doc:
299 message += ', see {} for details'.format(doc)
300 logger.warning(message)
301
302 return settings
303
[end of pelican/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pelican/settings.py b/pelican/settings.py
--- a/pelican/settings.py
+++ b/pelican/settings.py
@@ -68,6 +68,8 @@
'PDF_GENERATOR': False,
'PDF_STYLE_PATH': '',
'PDF_STYLE': 'twelvepoint',
+ 'CATEGORIES_URL': 'categories.html',
+ 'CATEGORIES_SAVE_AS': 'categories.html',
'CATEGORY_URL': 'category/{slug}.html',
'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),
'TAG_URL': 'tag/{slug}.html',
| {"golden_diff": "diff --git a/pelican/settings.py b/pelican/settings.py\n--- a/pelican/settings.py\n+++ b/pelican/settings.py\n@@ -68,6 +68,8 @@\n 'PDF_GENERATOR': False,\n 'PDF_STYLE_PATH': '',\n 'PDF_STYLE': 'twelvepoint',\n+ 'CATEGORIES_URL': 'categories.html',\n+ 'CATEGORIES_SAVE_AS': 'categories.html',\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n", "issue": "CATEGORIES_URL doesn't honor it's default value\nFirst of all, thanks for this great software. But, when I'm using the variable CATEGORIES_URL in templates (category.html), it doesn't have it's default value (as said on the documentation: CATEGORIES_URL ('categories/')). I need to explicitly set this value in pelican.conf. I'm using Pelican 3.3.0\n\nThanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nimport six\n\nimport copy\nimport inspect\nimport os\nimport locale\nimport logging\n\ntry:\n # SourceFileLoader is the recommended way in 3.3+\n from importlib.machinery import SourceFileLoader\n load_source = lambda name, path: SourceFileLoader(name, path).load_module()\nexcept ImportError:\n # but it does not exist in 3.2-, so fall back to imp\n import imp\n load_source = imp.load_source\n\nfrom os.path import isabs\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'themes', 'notmyidea')\nDEFAULT_CONFIG = {\n 'PATH': os.curdir,\n 'ARTICLE_DIR': '',\n 'ARTICLE_EXCLUDES': ('pages',),\n 'PAGE_DIR': 'pages',\n 'PAGE_EXCLUDES': (),\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output',\n 'READERS': {},\n 'STATIC_PATHS': ['images', ],\n 'THEME_STATIC_DIR': 'theme',\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED_ALL_ATOM': os.path.join('feeds', 'all.atom.xml'),\n 'CATEGORY_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),\n 'TRANSLATION_FEED_ATOM': os.path.join('feeds', 'all-%s.atom.xml'),\n 'FEED_MAX_ITEMS': '',\n 'SITEURL': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'DISPLAY_CATEGORIES_ON_MENU': True,\n 'OUTPUT_SOURCES': False,\n 'OUTPUT_SOURCES_EXTENSION': '.text',\n 'USE_FOLDER_AS_CATEGORY': True,\n 'DEFAULT_CATEGORY': 'misc',\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'NEWEST_FIRST_ARCHIVES': True,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'OUTPUT_RETENTION': (),\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'),\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': os.path.join('pages', '{slug}-{lang}.html'),\n 'STATIC_URL': '{path}',\n 'STATIC_SAVE_AS': '{path}',\n 'PDF_GENERATOR': False,\n 'PDF_STYLE_PATH': '',\n 'PDF_STYLE': 'twelvepoint',\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': os.path.join('tag', '{slug}.html'),\n 'AUTHOR_URL': 'author/{slug}.html',\n 'AUTHOR_SAVE_AS': os.path.join('author', '{slug}.html'),\n 'PAGINATION_PATTERNS': [\n (0, '{name}{number}.html', '{name}{number}.html'),\n ],\n 'YEAR_ARCHIVE_SAVE_AS': False,\n 'MONTH_ARCHIVE_SAVE_AS': False,\n 'DAY_ARCHIVE_SAVE_AS': False,\n 'RELATIVE_URLS': False,\n 'DEFAULT_LANG': 'en',\n 'TAG_CLOUD_STEPS': 4,\n 'TAG_CLOUD_MAX_ITEMS': 100,\n 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'authors', 'archives'),\n 'EXTRA_TEMPLATES_PATHS': [],\n 'PAGINATED_DIRECT_TEMPLATES': ('index', ),\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'ASCIIDOC_OPTIONS': [],\n 'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'],\n 'JINJA_EXTENSIONS': [],\n 'JINJA_FILTERS': {},\n 'LOCALE': [''], # defaults to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': (),\n 'FILENAME_METADATA': '(?P<date>\\d{4}-\\d{2}-\\d{2}).*',\n 'PATH_METADATA': '',\n 'EXTRA_PATH_METADATA': {},\n 'DEFAULT_STATUS': 'published',\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n 'SUMMARY_MAX_LENGTH': 50,\n 'PLUGIN_PATH': '',\n 'PLUGINS': [],\n 'PYGMENTS_RST_OPTIONS': {},\n 'TEMPLATE_PAGES': {},\n 'IGNORE_FILES': ['.#*'],\n 'SLUG_SUBSTITUTIONS': (),\n 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',\n }\n\nPYGMENTS_RST_OPTIONS = None\n\n\ndef read_settings(path=None, override=None):\n if path:\n local_settings = get_settings_from_file(path)\n # Make the paths relative to the settings file\n for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'PLUGIN_PATH']:\n if p in local_settings and local_settings[p] is not None \\\n and not isabs(local_settings[p]):\n absp = os.path.abspath(os.path.normpath(os.path.join(\n os.path.dirname(path), local_settings[p])))\n if p not in ('THEME', 'PLUGIN_PATH') or os.path.exists(absp):\n local_settings[p] = absp\n else:\n local_settings = copy.deepcopy(DEFAULT_CONFIG)\n\n if override:\n local_settings.update(override)\n\n parsed_settings = configure_settings(local_settings)\n # This is because there doesn't seem to be a way to pass extra\n # parameters to docutils directive handlers, so we have to have a\n # variable here that we'll import from within Pygments.run (see\n # rstdirectives.py) to see what the user defaults were.\n global PYGMENTS_RST_OPTIONS\n PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)\n return parsed_settings\n\n\ndef get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG):\n \"\"\"Loads settings from a module, returns a dictionary.\"\"\"\n\n context = copy.deepcopy(default_settings)\n if module is not None:\n context.update(\n (k, v) for k, v in inspect.getmembers(module) if k.isupper())\n return context\n\n\ndef get_settings_from_file(path, default_settings=DEFAULT_CONFIG):\n \"\"\"Loads settings from a file path, returning a dict.\"\"\"\n\n name, ext = os.path.splitext(os.path.basename(path))\n module = load_source(name, path)\n return get_settings_from_module(module, default_settings=default_settings)\n\n\ndef configure_settings(settings):\n \"\"\"Provide optimizations, error checking and warnings for the given\n settings.\n\n \"\"\"\n if not 'PATH' in settings or not os.path.isdir(settings['PATH']):\n raise Exception('You need to specify a path containing the content'\n ' (see pelican --help for more information)')\n\n # lookup the theme in \"pelican/themes\" if the given one doesn't exist\n if not os.path.isdir(settings['THEME']):\n theme_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'themes',\n settings['THEME'])\n if os.path.exists(theme_path):\n settings['THEME'] = theme_path\n else:\n raise Exception(\"Could not find the theme %s\"\n % settings['THEME'])\n\n # standardize strings to lowercase strings\n for key in [\n 'DEFAULT_LANG',\n ]:\n if key in settings:\n settings[key] = settings[key].lower()\n\n # standardize strings to lists\n for key in [\n 'LOCALE',\n ]:\n if key in settings and isinstance(settings[key], six.string_types):\n settings[key] = [settings[key]]\n\n # check settings that must be a particular type\n for key, types in [\n ('OUTPUT_SOURCES_EXTENSION', six.string_types),\n ('FILENAME_METADATA', six.string_types),\n ]:\n if key in settings and not isinstance(settings[key], types):\n value = settings.pop(key)\n logger.warn(\n 'Detected misconfigured {} ({}), '\n 'falling back to the default ({})'.format(\n key, value, DEFAULT_CONFIG[key]))\n\n # try to set the different locales, fallback on the default.\n locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, str(locale_))\n break # break if it is successful\n except locale.Error:\n pass\n else:\n logger.warning(\"LOCALE option doesn't contain a correct value\")\n\n if ('SITEURL' in settings):\n # If SITEURL has a trailing slash, remove it and provide a warning\n siteurl = settings['SITEURL']\n if (siteurl.endswith('/')):\n settings['SITEURL'] = siteurl[:-1]\n logger.warning(\"Removed extraneous trailing slash from SITEURL.\")\n # If SITEURL is defined but FEED_DOMAIN isn't,\n # set FEED_DOMAIN to SITEURL\n if not 'FEED_DOMAIN' in settings:\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n feed_keys = [\n 'FEED_ATOM', 'FEED_RSS',\n 'FEED_ALL_ATOM', 'FEED_ALL_RSS',\n 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',\n 'TAG_FEED_ATOM', 'TAG_FEED_RSS',\n 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',\n ]\n\n if any(settings.get(k) for k in feed_keys):\n if not settings.get('SITEURL'):\n logger.warning('Feeds generated without SITEURL set properly may'\n ' not be valid')\n\n if not 'TIMEZONE' in settings:\n logger.warning(\n 'No timezone information specified in the settings. Assuming'\n ' your timezone is UTC for feed generation. Check '\n 'http://docs.getpelican.com/en/latest/settings.html#timezone '\n 'for more information')\n\n # fix up pagination rules\n from pelican.paginator import PaginationRule\n pagination_rules = [\n PaginationRule(*r) for r in settings.get(\n 'PAGINATION_PATTERNS',\n DEFAULT_CONFIG['PAGINATION_PATTERNS'],\n )\n ]\n settings['PAGINATION_PATTERNS'] = sorted(\n pagination_rules,\n key=lambda r: r[0],\n )\n\n # Save people from accidentally setting a string rather than a list\n path_keys = (\n 'ARTICLE_EXCLUDES',\n 'DEFAULT_METADATA',\n 'DIRECT_TEMPLATES',\n 'EXTRA_TEMPLATES_PATHS',\n 'FILES_TO_COPY',\n 'IGNORE_FILES',\n 'JINJA_EXTENSIONS',\n 'PAGINATED_DIRECT_TEMPLATES',\n 'PLUGINS',\n 'STATIC_PATHS',\n 'THEME_STATIC_PATHS',\n )\n for PATH_KEY in filter(lambda k: k in settings, path_keys):\n if isinstance(settings[PATH_KEY], six.string_types):\n logger.warning(\"Detected misconfiguration with %s setting \"\n \"(must be a list), falling back to the default\"\n % PATH_KEY)\n settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]\n\n for old, new, doc in [\n ('LESS_GENERATOR', 'the Webassets plugin', None),\n ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',\n 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),\n ]:\n if old in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n old, new)\n if doc:\n message += ', see {} for details'.format(doc)\n logger.warning(message)\n\n return settings\n", "path": "pelican/settings.py"}]} | 4,072 | 133 |
gh_patches_debug_3973 | rasdani/github-patches | git_diff | encode__uvicorn-513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--reload not working on Windows but --loop asyncio is
Duplicate of https://github.com/encode/uvicorn/issues/477 but per request a new topic to track the issue. Especially useful since `--loop asyncio` does seem to work just fine.
@tomchristie suggests to default to asyncio on all platforms. I can make a PR for that. Do you still want to keep the optional uvloop dependency? (It's incompatible with Windows in any case, so perhaps it should be removed from the requirements file at least.)
</issue>
<code>
[start of uvicorn/loops/auto.py]
1 import sys
2
3
4 def auto_loop_setup():
5 try:
6 import uvloop
7 except ImportError as exc: # pragma: no cover
8 if sys.platform == "win32":
9 from uvicorn.loops.iocp import iocp_setup as loop_setup
10 else:
11 from uvicorn.loops.asyncio import asyncio_setup as loop_setup
12
13 loop_setup()
14 else:
15 from uvicorn.loops.uvloop import uvloop_setup
16
17 uvloop_setup()
18
[end of uvicorn/loops/auto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/loops/auto.py b/uvicorn/loops/auto.py
--- a/uvicorn/loops/auto.py
+++ b/uvicorn/loops/auto.py
@@ -5,10 +5,7 @@
try:
import uvloop
except ImportError as exc: # pragma: no cover
- if sys.platform == "win32":
- from uvicorn.loops.iocp import iocp_setup as loop_setup
- else:
- from uvicorn.loops.asyncio import asyncio_setup as loop_setup
+ from uvicorn.loops.asyncio import asyncio_setup as loop_setup
loop_setup()
else:
| {"golden_diff": "diff --git a/uvicorn/loops/auto.py b/uvicorn/loops/auto.py\n--- a/uvicorn/loops/auto.py\n+++ b/uvicorn/loops/auto.py\n@@ -5,10 +5,7 @@\n try:\n import uvloop\n except ImportError as exc: # pragma: no cover\n- if sys.platform == \"win32\":\n- from uvicorn.loops.iocp import iocp_setup as loop_setup\n- else:\n- from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n+ from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n \n loop_setup()\n else:\n", "issue": "--reload not working on Windows but --loop asyncio is\nDuplicate of https://github.com/encode/uvicorn/issues/477 but per request a new topic to track the issue. Especially useful since `--loop asyncio` does seem to work just fine.\r\n\r\n@tomchristie suggests to default to asyncio on all platforms. I can make a PR for that. Do you still want to keep the optional uvloop dependency? (It's incompatible with Windows in any case, so perhaps it should be removed from the requirements file at least.)\n", "before_files": [{"content": "import sys\n\n\ndef auto_loop_setup():\n try:\n import uvloop\n except ImportError as exc: # pragma: no cover\n if sys.platform == \"win32\":\n from uvicorn.loops.iocp import iocp_setup as loop_setup\n else:\n from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n\n loop_setup()\n else:\n from uvicorn.loops.uvloop import uvloop_setup\n\n uvloop_setup()\n", "path": "uvicorn/loops/auto.py"}]} | 779 | 145 |
gh_patches_debug_195 | rasdani/github-patches | git_diff | conda__conda-3524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Progress bar broken

```
C:\Users\Korijn\dev\myproject>conda info
Current conda install:
platform : win-64
conda version : 4.2.7
conda is private : False
conda-env version : 4.2.7
conda-build version : 2.0.1
python version : 3.5.1.final.0
requests version : 2.9.1
root environment : C:\Users\Korijn\Miniconda3 (writable)
default environment : C:\Users\Korijn\Miniconda3
envs directories : C:\Users\Korijn\Miniconda3\envs
package cache : C:\Users\Korijn\Miniconda3\pkgs
channel URLs : https://repo.continuum.io/pkgs/free/win-64/
https://repo.continuum.io/pkgs/free/noarch/
https://repo.continuum.io/pkgs/pro/win-64/
https://repo.continuum.io/pkgs/pro/noarch/
https://repo.continuum.io/pkgs/msys2/win-64/
https://repo.continuum.io/pkgs/msys2/noarch/
config file : C:\Users\Korijn\.condarc
offline mode : False
```
</issue>
<code>
[start of conda/base/constants.py]
1 # -*- coding: utf-8 -*-
2 """
3 This file should hold almost all string literals and magic numbers used throughout the code base.
4 The exception is if a literal is specifically meant to be private to and isolated within a module.
5 """
6 from __future__ import absolute_import, division, print_function
7
8 import os
9 import sys
10 from logging import getLogger
11 from platform import machine
12
13 from enum import Enum
14
15 from conda._vendor.auxlib.collection import frozendict
16
17 log = getLogger(__name__)
18
19
20 class Arch(Enum):
21 x86 = 'x86'
22 x86_64 = 'x86_64'
23 armv6l = 'armv6l'
24 armv7l = 'armv7l'
25 ppc64le = 'ppc64le'
26
27 @classmethod
28 def from_sys(cls):
29 return cls[machine()]
30
31
32 class Platform(Enum):
33 linux = 'linux'
34 win = 'win32'
35 openbsd = 'openbsd5'
36 osx = 'darwin'
37
38 @classmethod
39 def from_sys(cls):
40 p = sys.platform
41 if p.startswith('linux'):
42 # Changed in version 2.7.3: Since lots of code check for sys.platform == 'linux2',
43 # and there is no essential change between Linux 2.x and 3.x, sys.platform is always
44 # set to 'linux2', even on Linux 3.x. In Python 3.3 and later, the value will always
45 # be set to 'linux'
46 p = 'linux'
47 return cls(p)
48
49 machine_bits = 8 * tuple.__itemsize__
50
51 # UID = os.getuid()
52 PWD = os.getcwd()
53 CONDA = 'CONDA'
54 CONDA_ = 'CONDA_'
55 conda = 'conda'
56
57 SEARCH_PATH = (
58 '/etc/conda/condarc',
59 '/etc/conda/condarc.d/',
60 '/var/lib/conda/condarc',
61 '/var/lib/conda/condarc.d/',
62 '$CONDA_ROOT/condarc',
63 '$CONDA_ROOT/.condarc',
64 '$CONDA_ROOT/condarc.d/',
65 '~/.conda/condarc',
66 '~/.conda/condarc.d/',
67 '~/.condarc',
68 '$CONDA_PREFIX/.condarc',
69 '$CONDA_PREFIX/condarc.d/',
70 '$CONDARC',
71 )
72
73 DEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'
74
75 PLATFORM_DIRECTORIES = ("linux-64", "linux-32",
76 "win-64", "win-32",
77 "osx-64", "noarch")
78
79 RECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')
80
81
82 if Platform.from_sys() is Platform.win:
83 DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',
84 'https://repo.continuum.io/pkgs/pro',
85 'https://repo.continuum.io/pkgs/msys2',
86 )
87 else:
88 DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',
89 'https://repo.continuum.io/pkgs/pro',
90 )
91
92 ROOT_ENV_NAME = 'root'
93
94 EMPTY_LIST = ()
95 EMPTY_MAP = frozendict()
96
97
98 class _Null(object):
99 def __nonzero__(self):
100 return False
101
102 NULL = _Null()
103
104 UTF8 = 'UTF-8'
105
[end of conda/base/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/base/constants.py b/conda/base/constants.py
--- a/conda/base/constants.py
+++ b/conda/base/constants.py
@@ -99,6 +99,9 @@
def __nonzero__(self):
return False
+ def __bool__(self):
+ return False
+
NULL = _Null()
UTF8 = 'UTF-8'
| {"golden_diff": "diff --git a/conda/base/constants.py b/conda/base/constants.py\n--- a/conda/base/constants.py\n+++ b/conda/base/constants.py\n@@ -99,6 +99,9 @@\n def __nonzero__(self):\n return False\n \n+ def __bool__(self):\n+ return False\n+\n NULL = _Null()\n \n UTF8 = 'UTF-8'\n", "issue": "Progress bar broken\n\n\n```\nC:\\Users\\Korijn\\dev\\myproject>conda info\nCurrent conda install:\n\n platform : win-64\n conda version : 4.2.7\n conda is private : False\n conda-env version : 4.2.7\n conda-build version : 2.0.1\n python version : 3.5.1.final.0\n requests version : 2.9.1\n root environment : C:\\Users\\Korijn\\Miniconda3 (writable)\n default environment : C:\\Users\\Korijn\\Miniconda3\n envs directories : C:\\Users\\Korijn\\Miniconda3\\envs\n package cache : C:\\Users\\Korijn\\Miniconda3\\pkgs\n channel URLs : https://repo.continuum.io/pkgs/free/win-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/win-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n https://repo.continuum.io/pkgs/msys2/win-64/\n https://repo.continuum.io/pkgs/msys2/noarch/\n config file : C:\\Users\\Korijn\\.condarc\n offline mode : False\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis file should hold almost all string literals and magic numbers used throughout the code base.\nThe exception is if a literal is specifically meant to be private to and isolated within a module.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom logging import getLogger\nfrom platform import machine\n\nfrom enum import Enum\n\nfrom conda._vendor.auxlib.collection import frozendict\n\nlog = getLogger(__name__)\n\n\nclass Arch(Enum):\n x86 = 'x86'\n x86_64 = 'x86_64'\n armv6l = 'armv6l'\n armv7l = 'armv7l'\n ppc64le = 'ppc64le'\n\n @classmethod\n def from_sys(cls):\n return cls[machine()]\n\n\nclass Platform(Enum):\n linux = 'linux'\n win = 'win32'\n openbsd = 'openbsd5'\n osx = 'darwin'\n\n @classmethod\n def from_sys(cls):\n p = sys.platform\n if p.startswith('linux'):\n # Changed in version 2.7.3: Since lots of code check for sys.platform == 'linux2',\n # and there is no essential change between Linux 2.x and 3.x, sys.platform is always\n # set to 'linux2', even on Linux 3.x. In Python 3.3 and later, the value will always\n # be set to 'linux'\n p = 'linux'\n return cls(p)\n\nmachine_bits = 8 * tuple.__itemsize__\n\n# UID = os.getuid()\nPWD = os.getcwd()\nCONDA = 'CONDA'\nCONDA_ = 'CONDA_'\nconda = 'conda'\n\nSEARCH_PATH = (\n '/etc/conda/condarc',\n '/etc/conda/condarc.d/',\n '/var/lib/conda/condarc',\n '/var/lib/conda/condarc.d/',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc.d/',\n '~/.conda/condarc',\n '~/.conda/condarc.d/',\n '~/.condarc',\n '$CONDA_PREFIX/.condarc',\n '$CONDA_PREFIX/condarc.d/',\n '$CONDARC',\n)\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'\n\nPLATFORM_DIRECTORIES = (\"linux-64\", \"linux-32\",\n \"win-64\", \"win-32\",\n \"osx-64\", \"noarch\")\n\nRECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')\n\n\nif Platform.from_sys() is Platform.win:\n DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro',\n 'https://repo.continuum.io/pkgs/msys2',\n )\nelse:\n DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro',\n )\n\nROOT_ENV_NAME = 'root'\n\nEMPTY_LIST = ()\nEMPTY_MAP = frozendict()\n\n\nclass _Null(object):\n def __nonzero__(self):\n return False\n\nNULL = _Null()\n\nUTF8 = 'UTF-8'\n", "path": "conda/base/constants.py"}]} | 1,863 | 84 |
gh_patches_debug_14579 | rasdani/github-patches | git_diff | boto__boto-1543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support addtional regions in CloudSearch
It appears that boto currently only supports two regions. It would be nice to support the more recently added ones.
Current list of CloudSearch regions is here: http://aws.amazon.com/cloudsearch/faqs/#Which_AWS_regions_is_CloudSearch_available_in?
I think it just needs to be added to boto/cloudsearch/**init**.py:
return [RegionInfo(name='us-east-1',
endpoint='cloudsearch.us-east-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
And yes, you can use another region by changing the default field in .boto
</issue>
<code>
[start of boto/cloudsearch/__init__.py]
1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
3 # All Rights Reserved
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish, dis-
9 # tribute, sublicense, and/or sell copies of the Software, and to permit
10 # persons to whom the Software is furnished to do so, subject to the fol-
11 # lowing conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 # IN THE SOFTWARE.
23 #
24 from boto.ec2.regioninfo import RegionInfo
25
26
27 def regions():
28 """
29 Get all available regions for the Amazon CloudSearch service.
30
31 :rtype: list
32 :return: A list of :class:`boto.regioninfo.RegionInfo`
33 """
34 import boto.cloudsearch.layer1
35 return [RegionInfo(name='us-east-1',
36 endpoint='cloudsearch.us-east-1.amazonaws.com',
37 connection_cls=boto.cloudsearch.layer1.Layer1),
38 RegionInfo(name='eu-west-1',
39 endpoint='cloudsearch.eu-west-1.amazonaws.com',
40 connection_cls=boto.cloudsearch.layer1.Layer1),
41 ]
42
43
44 def connect_to_region(region_name, **kw_params):
45 for region in regions():
46 if region.name == region_name:
47 return region.connect(**kw_params)
48 return None
49
[end of boto/cloudsearch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py
--- a/boto/cloudsearch/__init__.py
+++ b/boto/cloudsearch/__init__.py
@@ -38,6 +38,16 @@
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-1',
+ endpoint='cloudsearch.us-west-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-2',
+ endpoint='cloudsearch.us-west-2.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='cloudsearch.ap-southeast-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+
]
| {"golden_diff": "diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py\n--- a/boto/cloudsearch/__init__.py\n+++ b/boto/cloudsearch/__init__.py\n@@ -38,6 +38,16 @@\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n+ RegionInfo(name='us-west-1',\n+ endpoint='cloudsearch.us-west-1.amazonaws.com',\n+ connection_cls=boto.cloudsearch.layer1.Layer1),\n+ RegionInfo(name='us-west-2',\n+ endpoint='cloudsearch.us-west-2.amazonaws.com',\n+ connection_cls=boto.cloudsearch.layer1.Layer1),\n+ RegionInfo(name='ap-southeast-1',\n+ endpoint='cloudsearch.ap-southeast-1.amazonaws.com',\n+ connection_cls=boto.cloudsearch.layer1.Layer1),\n+\n ]\n", "issue": "Support addtional regions in CloudSearch \nIt appears that boto currently only supports two regions. It would be nice to support the more recently added ones. \n\nCurrent list of CloudSearch regions is here: http://aws.amazon.com/cloudsearch/faqs/#Which_AWS_regions_is_CloudSearch_available_in?\n\nI think it just needs to be added to boto/cloudsearch/**init**.py:\n\nreturn [RegionInfo(name='us-east-1',\n endpoint='cloudsearch.us-east-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n\nAnd yes, you can use another region by changing the default field in .boto \n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.\n# All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\nfrom boto.ec2.regioninfo import RegionInfo\n\n\ndef regions():\n \"\"\"\n Get all available regions for the Amazon CloudSearch service.\n\n :rtype: list\n :return: A list of :class:`boto.regioninfo.RegionInfo`\n \"\"\"\n import boto.cloudsearch.layer1\n return [RegionInfo(name='us-east-1',\n endpoint='cloudsearch.us-east-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n ]\n\n\ndef connect_to_region(region_name, **kw_params):\n for region in regions():\n if region.name == region_name:\n return region.connect(**kw_params)\n return None\n", "path": "boto/cloudsearch/__init__.py"}]} | 1,241 | 209 |
gh_patches_debug_24204 | rasdani/github-patches | git_diff | ansible-collections__community.general-1110 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
capabilities: libcap-2.4.3 changed output of getcap
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
getcap output changed in version 2.43 vs. 2.26 of libcap, breaking community.general.capabilities module
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
capabilities
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.10.1
config file = /home/thomas/git/ansible/ansible.cfg
configured module search path = ['/home/thomas/git/ansible/library']
ansible python module location = /home/thomas/.python-venvs/_home_thomas_git_ansible/lib/python3.6/site-packages/ansible
executable location = /home/thomas/.python-venvs/_home_thomas_git_ansible/bin/ansible
python version = 3.6.9 (default, Jul 17 2020, 12:50:27) [GCC 8.4.0]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
CACHE_PLUGIN(/home/thomas/git/ansible/ansible.cfg) = jsonfile
CACHE_PLUGIN_CONNECTION(/home/thomas/git/ansible/ansible.cfg) = factscache
CACHE_PLUGIN_TIMEOUT(/home/thomas/git/ansible/ansible.cfg) = 86400
DEFAULT_BECOME(/home/thomas/git/ansible/ansible.cfg) = True
DEFAULT_BECOME_ASK_PASS(/home/thomas/git/ansible/ansible.cfg) = False
DEFAULT_BECOME_METHOD(/home/thomas/git/ansible/ansible.cfg) = sudo
DEFAULT_BECOME_USER(/home/thomas/git/ansible/ansible.cfg) = root
DEFAULT_CALLBACK_WHITELIST(/home/thomas/git/ansible/ansible.cfg) = ['profile_roles', 'profile_tasks', 'timer']
DEFAULT_FORKS(/home/thomas/git/ansible/ansible.cfg) = 12
DEFAULT_GATHERING(/home/thomas/git/ansible/ansible.cfg) = smart
DEFAULT_HOST_LIST(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/inventory/production.py', '/home/thomas/git/ansible/inventory/nameserver_hcloud.yaml']
DEFAULT_MANAGED_STR(/home/thomas/git/ansible/ansible.cfg) = This file is managed by Ansible. Do not edit manually!
DEFAULT_MODULE_PATH(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/library']
INTERPRETER_PYTHON(/home/thomas/git/ansible/ansible.cfg) = auto_silent
INVENTORY_ENABLED(/home/thomas/git/ansible/ansible.cfg) = ['script', 'hcloud']
MAX_FILE_SIZE_FOR_DIFF(/home/thomas/git/ansible/ansible.cfg) = 1048576
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Host running ansible is Ubuntu 18.04, target system is Gentoo with sys-libs/libcap-2.4.3
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: ensure capabilities for ping are set
community.general.capabilities:
path: /bin/ping
capability: cap_net_raw+ep
state: present
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
Ansible sets cap_net_raw+ep if it is not already set
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
Ansible fails getting current capabilities.
<!--- Paste verbatim command output between quotes -->
```paste below
fatal: [server20.tralios.de]: FAILED! => {
"changed": false,
"invocation": {
"module_args": {
"capability": "cap_net_raw+ep",
"path": "/bin/ping",
"state": "present"
}
},
"msg": "Unable to get capabilities of /bin/ping",
"stderr": "",
"stderr_lines": [],
"stdout": "/bin/ping cap_net_raw=ep",
"stdout_lines": [
"/bin/ping cap_net_raw=ep"
]
}
```
##### Additional information
Version 2.26:
```
➜ touch capabilities_test
➜ sudo setcap cap_net_raw+ep capabilities_test
➜ getcap capabilities_test
capabilities_test = cap_net_raw+ep
```
Version 2.43:
```
➜ getcap capabilities_test
capabilities_test cap_net_raw=ep
```
</issue>
<code>
[start of plugins/modules/system/capabilities.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2014, Nate Coraor <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10 DOCUMENTATION = r'''
11 ---
12 module: capabilities
13 short_description: Manage Linux capabilities
14 description:
15 - This module manipulates files privileges using the Linux capabilities(7) system.
16 options:
17 path:
18 description:
19 - Specifies the path to the file to be managed.
20 type: str
21 required: yes
22 aliases: [ key ]
23 capability:
24 description:
25 - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
26 type: str
27 required: yes
28 aliases: [ cap ]
29 state:
30 description:
31 - Whether the entry should be present or absent in the file's capabilities.
32 type: str
33 choices: [ absent, present ]
34 default: present
35 notes:
36 - The capabilities system will automatically transform operators and flags into the effective set,
37 so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
38 - This module does not attempt to determine the final operator and flags to compare,
39 so you will want to ensure that your capabilities argument matches the final capabilities.
40 author:
41 - Nate Coraor (@natefoo)
42 '''
43
44 EXAMPLES = r'''
45 - name: Set cap_sys_chroot+ep on /foo
46 community.general.capabilities:
47 path: /foo
48 capability: cap_sys_chroot+ep
49 state: present
50
51 - name: Remove cap_net_bind_service from /bar
52 community.general.capabilities:
53 path: /bar
54 capability: cap_net_bind_service
55 state: absent
56 '''
57
58 from ansible.module_utils.basic import AnsibleModule
59
60 OPS = ('=', '-', '+')
61
62
63 class CapabilitiesModule(object):
64 platform = 'Linux'
65 distribution = None
66
67 def __init__(self, module):
68 self.module = module
69 self.path = module.params['path'].strip()
70 self.capability = module.params['capability'].strip().lower()
71 self.state = module.params['state']
72 self.getcap_cmd = module.get_bin_path('getcap', required=True)
73 self.setcap_cmd = module.get_bin_path('setcap', required=True)
74 self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
75
76 self.run()
77
78 def run(self):
79
80 current = self.getcap(self.path)
81 caps = [cap[0] for cap in current]
82
83 if self.state == 'present' and self.capability_tup not in current:
84 # need to add capability
85 if self.module.check_mode:
86 self.module.exit_json(changed=True, msg='capabilities changed')
87 else:
88 # remove from current cap list if it's already set (but op/flags differ)
89 current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
90 # add new cap with correct op/flags
91 current.append(self.capability_tup)
92 self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
93 elif self.state == 'absent' and self.capability_tup[0] in caps:
94 # need to remove capability
95 if self.module.check_mode:
96 self.module.exit_json(changed=True, msg='capabilities changed')
97 else:
98 # remove from current cap list and then set current list
99 current = filter(lambda x: x[0] != self.capability_tup[0], current)
100 self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
101 self.module.exit_json(changed=False, state=self.state)
102
103 def getcap(self, path):
104 rval = []
105 cmd = "%s -v %s" % (self.getcap_cmd, path)
106 rc, stdout, stderr = self.module.run_command(cmd)
107 # If file xattrs are set but no caps are set the output will be:
108 # '/foo ='
109 # If file xattrs are unset the output will be:
110 # '/foo'
111 # If the file does not exist the output will be (with rc == 0...):
112 # '/foo (No such file or directory)'
113 if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
114 self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
115 if stdout.strip() != path:
116 caps = stdout.split(' =')[1].strip().split()
117 for cap in caps:
118 cap = cap.lower()
119 # getcap condenses capabilities with the same op/flags into a
120 # comma-separated list, so we have to parse that
121 if ',' in cap:
122 cap_group = cap.split(',')
123 cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
124 for subcap in cap_group:
125 rval.append((subcap, op, flags))
126 else:
127 rval.append(self._parse_cap(cap))
128 return rval
129
130 def setcap(self, path, caps):
131 caps = ' '.join([''.join(cap) for cap in caps])
132 cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
133 rc, stdout, stderr = self.module.run_command(cmd)
134 if rc != 0:
135 self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
136 else:
137 return stdout
138
139 def _parse_cap(self, cap, op_required=True):
140 opind = -1
141 try:
142 i = 0
143 while opind == -1:
144 opind = cap.find(OPS[i])
145 i += 1
146 except Exception:
147 if op_required:
148 self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
149 else:
150 return (cap, None, None)
151 op = cap[opind]
152 cap, flags = cap.split(op)
153 return (cap, op, flags)
154
155
156 # ==============================================================
157 # main
158
159 def main():
160 # defining module
161 module = AnsibleModule(
162 argument_spec=dict(
163 path=dict(type='str', required=True, aliases=['key']),
164 capability=dict(type='str', required=True, aliases=['cap']),
165 state=dict(type='str', default='present', choices=['absent', 'present']),
166 ),
167 supports_check_mode=True,
168 )
169
170 CapabilitiesModule(module)
171
172
173 if __name__ == '__main__':
174 main()
175
[end of plugins/modules/system/capabilities.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/system/capabilities.py
--- a/plugins/modules/system/capabilities.py
+++ b/plugins/modules/system/capabilities.py
@@ -108,12 +108,18 @@
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
- # If the file does not exist the output will be (with rc == 0...):
+ # If the file does not exist, the stderr will be (with rc == 0...):
# '/foo (No such file or directory)'
- if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
+ if rc != 0 or stderr != "":
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
- caps = stdout.split(' =')[1].strip().split()
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
| {"golden_diff": "diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/system/capabilities.py\n--- a/plugins/modules/system/capabilities.py\n+++ b/plugins/modules/system/capabilities.py\n@@ -108,12 +108,18 @@\n # '/foo ='\n # If file xattrs are unset the output will be:\n # '/foo'\n- # If the file does not exist the output will be (with rc == 0...):\n+ # If the file does not exist, the stderr will be (with rc == 0...):\n # '/foo (No such file or directory)'\n- if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):\n+ if rc != 0 or stderr != \"\":\n self.module.fail_json(msg=\"Unable to get capabilities of %s\" % path, stdout=stdout.strip(), stderr=stderr)\n if stdout.strip() != path:\n- caps = stdout.split(' =')[1].strip().split()\n+ if ' =' in stdout:\n+ # process output of an older version of libcap\n+ caps = stdout.split(' =')[1].strip().split()\n+ else:\n+ # otherwise, we have a newer version here\n+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git\n+ caps = stdout.split()[1].strip().split()\n for cap in caps:\n cap = cap.lower()\n # getcap condenses capabilities with the same op/flags into a\n", "issue": "capabilities: libcap-2.4.3 changed output of getcap\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\ngetcap output changed in version 2.43 vs. 2.26 of libcap, breaking community.general.capabilities module\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\ncapabilities\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```\r\nansible 2.10.1\r\n config file = /home/thomas/git/ansible/ansible.cfg\r\n configured module search path = ['/home/thomas/git/ansible/library']\r\n ansible python module location = /home/thomas/.python-venvs/_home_thomas_git_ansible/lib/python3.6/site-packages/ansible\r\n executable location = /home/thomas/.python-venvs/_home_thomas_git_ansible/bin/ansible\r\n python version = 3.6.9 (default, Jul 17 2020, 12:50:27) [GCC 8.4.0]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- Paste verbatim output from \"ansible-config dump --only-changed\" between quotes -->\r\n```\r\nCACHE_PLUGIN(/home/thomas/git/ansible/ansible.cfg) = jsonfile\r\nCACHE_PLUGIN_CONNECTION(/home/thomas/git/ansible/ansible.cfg) = factscache\r\nCACHE_PLUGIN_TIMEOUT(/home/thomas/git/ansible/ansible.cfg) = 86400\r\nDEFAULT_BECOME(/home/thomas/git/ansible/ansible.cfg) = True\r\nDEFAULT_BECOME_ASK_PASS(/home/thomas/git/ansible/ansible.cfg) = False\r\nDEFAULT_BECOME_METHOD(/home/thomas/git/ansible/ansible.cfg) = sudo\r\nDEFAULT_BECOME_USER(/home/thomas/git/ansible/ansible.cfg) = root\r\nDEFAULT_CALLBACK_WHITELIST(/home/thomas/git/ansible/ansible.cfg) = ['profile_roles', 'profile_tasks', 'timer']\r\nDEFAULT_FORKS(/home/thomas/git/ansible/ansible.cfg) = 12\r\nDEFAULT_GATHERING(/home/thomas/git/ansible/ansible.cfg) = smart\r\nDEFAULT_HOST_LIST(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/inventory/production.py', '/home/thomas/git/ansible/inventory/nameserver_hcloud.yaml']\r\nDEFAULT_MANAGED_STR(/home/thomas/git/ansible/ansible.cfg) = This file is managed by Ansible. Do not edit manually!\r\nDEFAULT_MODULE_PATH(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/library']\r\nINTERPRETER_PYTHON(/home/thomas/git/ansible/ansible.cfg) = auto_silent\r\nINVENTORY_ENABLED(/home/thomas/git/ansible/ansible.cfg) = ['script', 'hcloud']\r\nMAX_FILE_SIZE_FOR_DIFF(/home/thomas/git/ansible/ansible.cfg) = 1048576\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nHost running ansible is Ubuntu 18.04, target system is Gentoo with sys-libs/libcap-2.4.3\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: ensure capabilities for ping are set\r\n community.general.capabilities:\r\n path: /bin/ping\r\n capability: cap_net_raw+ep\r\n state: present\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\nAnsible sets cap_net_raw+ep if it is not already set\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\nAnsible fails getting current capabilities.\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```paste below\r\nfatal: [server20.tralios.de]: FAILED! => {\r\n \"changed\": false,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"capability\": \"cap_net_raw+ep\",\r\n \"path\": \"/bin/ping\",\r\n \"state\": \"present\"\r\n }\r\n },\r\n \"msg\": \"Unable to get capabilities of /bin/ping\",\r\n \"stderr\": \"\",\r\n \"stderr_lines\": [],\r\n \"stdout\": \"/bin/ping cap_net_raw=ep\",\r\n \"stdout_lines\": [\r\n \"/bin/ping cap_net_raw=ep\"\r\n ]\r\n}\r\n```\r\n\r\n##### Additional information\r\n\r\nVersion 2.26:\r\n```\r\n\u279c touch capabilities_test\r\n\u279c sudo setcap cap_net_raw+ep capabilities_test\r\n\u279c getcap capabilities_test \r\ncapabilities_test = cap_net_raw+ep\r\n```\r\n\r\nVersion 2.43:\r\n```\r\n\u279c getcap capabilities_test \r\ncapabilities_test cap_net_raw=ep\r\n```\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2014, Nate Coraor <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: capabilities\nshort_description: Manage Linux capabilities\ndescription:\n - This module manipulates files privileges using the Linux capabilities(7) system.\noptions:\n path:\n description:\n - Specifies the path to the file to be managed.\n type: str\n required: yes\n aliases: [ key ]\n capability:\n description:\n - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))\n type: str\n required: yes\n aliases: [ cap ]\n state:\n description:\n - Whether the entry should be present or absent in the file's capabilities.\n type: str\n choices: [ absent, present ]\n default: present\nnotes:\n - The capabilities system will automatically transform operators and flags into the effective set,\n so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).\n - This module does not attempt to determine the final operator and flags to compare,\n so you will want to ensure that your capabilities argument matches the final capabilities.\nauthor:\n- Nate Coraor (@natefoo)\n'''\n\nEXAMPLES = r'''\n- name: Set cap_sys_chroot+ep on /foo\n community.general.capabilities:\n path: /foo\n capability: cap_sys_chroot+ep\n state: present\n\n- name: Remove cap_net_bind_service from /bar\n community.general.capabilities:\n path: /bar\n capability: cap_net_bind_service\n state: absent\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nOPS = ('=', '-', '+')\n\n\nclass CapabilitiesModule(object):\n platform = 'Linux'\n distribution = None\n\n def __init__(self, module):\n self.module = module\n self.path = module.params['path'].strip()\n self.capability = module.params['capability'].strip().lower()\n self.state = module.params['state']\n self.getcap_cmd = module.get_bin_path('getcap', required=True)\n self.setcap_cmd = module.get_bin_path('setcap', required=True)\n self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')\n\n self.run()\n\n def run(self):\n\n current = self.getcap(self.path)\n caps = [cap[0] for cap in current]\n\n if self.state == 'present' and self.capability_tup not in current:\n # need to add capability\n if self.module.check_mode:\n self.module.exit_json(changed=True, msg='capabilities changed')\n else:\n # remove from current cap list if it's already set (but op/flags differ)\n current = list(filter(lambda x: x[0] != self.capability_tup[0], current))\n # add new cap with correct op/flags\n current.append(self.capability_tup)\n self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))\n elif self.state == 'absent' and self.capability_tup[0] in caps:\n # need to remove capability\n if self.module.check_mode:\n self.module.exit_json(changed=True, msg='capabilities changed')\n else:\n # remove from current cap list and then set current list\n current = filter(lambda x: x[0] != self.capability_tup[0], current)\n self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))\n self.module.exit_json(changed=False, state=self.state)\n\n def getcap(self, path):\n rval = []\n cmd = \"%s -v %s\" % (self.getcap_cmd, path)\n rc, stdout, stderr = self.module.run_command(cmd)\n # If file xattrs are set but no caps are set the output will be:\n # '/foo ='\n # If file xattrs are unset the output will be:\n # '/foo'\n # If the file does not exist the output will be (with rc == 0...):\n # '/foo (No such file or directory)'\n if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):\n self.module.fail_json(msg=\"Unable to get capabilities of %s\" % path, stdout=stdout.strip(), stderr=stderr)\n if stdout.strip() != path:\n caps = stdout.split(' =')[1].strip().split()\n for cap in caps:\n cap = cap.lower()\n # getcap condenses capabilities with the same op/flags into a\n # comma-separated list, so we have to parse that\n if ',' in cap:\n cap_group = cap.split(',')\n cap_group[-1], op, flags = self._parse_cap(cap_group[-1])\n for subcap in cap_group:\n rval.append((subcap, op, flags))\n else:\n rval.append(self._parse_cap(cap))\n return rval\n\n def setcap(self, path, caps):\n caps = ' '.join([''.join(cap) for cap in caps])\n cmd = \"%s '%s' %s\" % (self.setcap_cmd, caps, path)\n rc, stdout, stderr = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg=\"Unable to set capabilities of %s\" % path, stdout=stdout, stderr=stderr)\n else:\n return stdout\n\n def _parse_cap(self, cap, op_required=True):\n opind = -1\n try:\n i = 0\n while opind == -1:\n opind = cap.find(OPS[i])\n i += 1\n except Exception:\n if op_required:\n self.module.fail_json(msg=\"Couldn't find operator (one of: %s)\" % str(OPS))\n else:\n return (cap, None, None)\n op = cap[opind]\n cap, flags = cap.split(op)\n return (cap, op, flags)\n\n\n# ==============================================================\n# main\n\ndef main():\n # defining module\n module = AnsibleModule(\n argument_spec=dict(\n path=dict(type='str', required=True, aliases=['key']),\n capability=dict(type='str', required=True, aliases=['cap']),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n ),\n supports_check_mode=True,\n )\n\n CapabilitiesModule(module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/capabilities.py"}]} | 3,555 | 346 |
gh_patches_debug_36577 | rasdani/github-patches | git_diff | geopandas__geopandas-1159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spatial join error after using `explode()`
@jorisvandenbossche There is a specific situation that produces an error with `.sjoin()`.
I receive a following error:
```
File "C:\Hyapp\Anaconda3\lib\site-packages\pandas\core\indexes\base.py", line 1330, in set_names
raise TypeError("Must pass list-like as `names`.")
TypeError: Must pass list-like as `names`.
```
I was able to narrow this down, and the error only happens after using `df.explode()` function to turn MultiPolygons into multiple Polygons.
Here is an example to produce the error using some data from OSM:
```
# Test to produce an error
import osmnx as ox
import geopandas as gpd
buildings = ox.footprints_from_place("Punavuori, Finland")
buildings = buildings.explode()
landuse = ox.footprints_from_place("Punavuori, Finland", 'landuse')
join = gpd.sjoin(buildings, landuse)
```
The solution for fixing this was to `reset_index()` after using the `.explode()`. I am not sure if this is a bug or expected behavior, but wanted to report my findings anyway. 🙂
</issue>
<code>
[start of geopandas/tools/sjoin.py]
1 from warnings import warn
2
3 import numpy as np
4 import pandas as pd
5
6 from shapely import prepared
7
8 from geopandas import GeoDataFrame
9
10
11 def sjoin(
12 left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
13 ):
14 """Spatial join of two GeoDataFrames.
15
16 Parameters
17 ----------
18 left_df, right_df : GeoDataFrames
19 how : string, default 'inner'
20 The type of join:
21
22 * 'left': use keys from left_df; retain only left_df geometry column
23 * 'right': use keys from right_df; retain only right_df geometry column
24 * 'inner': use intersection of keys from both dfs; retain only
25 left_df geometry column
26 op : string, default 'intersection'
27 Binary predicate, one of {'intersects', 'contains', 'within'}.
28 See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
29 lsuffix : string, default 'left'
30 Suffix to apply to overlapping column names (left GeoDataFrame).
31 rsuffix : string, default 'right'
32 Suffix to apply to overlapping column names (right GeoDataFrame).
33
34 """
35 if not isinstance(left_df, GeoDataFrame):
36 raise ValueError(
37 "'left_df' should be GeoDataFrame, got {}".format(type(left_df))
38 )
39
40 if not isinstance(right_df, GeoDataFrame):
41 raise ValueError(
42 "'right_df' should be GeoDataFrame, got {}".format(type(right_df))
43 )
44
45 allowed_hows = ["left", "right", "inner"]
46 if how not in allowed_hows:
47 raise ValueError(
48 '`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
49 )
50
51 allowed_ops = ["contains", "within", "intersects"]
52 if op not in allowed_ops:
53 raise ValueError(
54 '`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
55 )
56
57 if left_df.crs != right_df.crs:
58 warn(
59 (
60 "CRS of frames being joined does not match!"
61 "(%s != %s)" % (left_df.crs, right_df.crs)
62 )
63 )
64
65 index_left = "index_%s" % lsuffix
66 index_right = "index_%s" % rsuffix
67
68 # due to GH 352
69 if any(left_df.columns.isin([index_left, index_right])) or any(
70 right_df.columns.isin([index_left, index_right])
71 ):
72 raise ValueError(
73 "'{0}' and '{1}' cannot be names in the frames being"
74 " joined".format(index_left, index_right)
75 )
76
77 # Attempt to re-use spatial indexes, otherwise generate the spatial index
78 # for the longer dataframe
79 if right_df._sindex_generated or (
80 not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
81 ):
82 tree_idx = right_df.sindex
83 tree_idx_right = True
84 else:
85 tree_idx = left_df.sindex
86 tree_idx_right = False
87
88 # the rtree spatial index only allows limited (numeric) index types, but an
89 # index in geopandas may be any arbitrary dtype. so reset both indices now
90 # and store references to the original indices, to be reaffixed later.
91 # GH 352
92 left_df = left_df.copy(deep=True)
93 left_index_name = left_df.index.name
94 left_df.index = left_df.index.rename(index_left)
95 left_df = left_df.reset_index()
96 right_df = right_df.copy(deep=True)
97 right_index_name = right_df.index.name
98 right_df.index = right_df.index.rename(index_right)
99 right_df = right_df.reset_index()
100
101 if op == "within":
102 # within implemented as the inverse of contains; swap names
103 left_df, right_df = right_df, left_df
104 tree_idx_right = not tree_idx_right
105
106 r_idx = np.empty((0, 0))
107 l_idx = np.empty((0, 0))
108 # get rtree spatial index
109 if tree_idx_right:
110 idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
111 lambda x: list(tree_idx.intersection(x)) if not x == () else []
112 )
113 idxmatch = idxmatch[idxmatch.apply(len) > 0]
114 # indexes of overlapping boundaries
115 if idxmatch.shape[0] > 0:
116 r_idx = np.concatenate(idxmatch.values)
117 l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
118 else:
119 # tree_idx_df == 'left'
120 idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
121 lambda x: list(tree_idx.intersection(x)) if not x == () else []
122 )
123 idxmatch = idxmatch[idxmatch.apply(len) > 0]
124 if idxmatch.shape[0] > 0:
125 # indexes of overlapping boundaries
126 l_idx = np.concatenate(idxmatch.values)
127 r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
128
129 if len(r_idx) > 0 and len(l_idx) > 0:
130 # Vectorize predicate operations
131 def find_intersects(a1, a2):
132 return a1.intersects(a2)
133
134 def find_contains(a1, a2):
135 return a1.contains(a2)
136
137 predicate_d = {
138 "intersects": find_intersects,
139 "contains": find_contains,
140 "within": find_contains,
141 }
142
143 check_predicates = np.vectorize(predicate_d[op])
144
145 result = pd.DataFrame(
146 np.column_stack(
147 [
148 l_idx,
149 r_idx,
150 check_predicates(
151 left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
152 right_df[right_df.geometry.name][r_idx],
153 ),
154 ]
155 )
156 )
157
158 result.columns = ["_key_left", "_key_right", "match_bool"]
159 result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
160 "match_bool", axis=1
161 )
162
163 else:
164 # when output from the join has no overlapping geometries
165 result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
166
167 if op == "within":
168 # within implemented as the inverse of contains; swap names
169 left_df, right_df = right_df, left_df
170 result = result.rename(
171 columns={"_key_left": "_key_right", "_key_right": "_key_left"}
172 )
173
174 if how == "inner":
175 result = result.set_index("_key_left")
176 joined = (
177 left_df.merge(result, left_index=True, right_index=True)
178 .merge(
179 right_df.drop(right_df.geometry.name, axis=1),
180 left_on="_key_right",
181 right_index=True,
182 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
183 )
184 .set_index(index_left)
185 .drop(["_key_right"], axis=1)
186 )
187 joined.index.name = left_index_name
188
189 elif how == "left":
190 result = result.set_index("_key_left")
191 joined = (
192 left_df.merge(result, left_index=True, right_index=True, how="left")
193 .merge(
194 right_df.drop(right_df.geometry.name, axis=1),
195 how="left",
196 left_on="_key_right",
197 right_index=True,
198 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
199 )
200 .set_index(index_left)
201 .drop(["_key_right"], axis=1)
202 )
203 joined.index.name = left_index_name
204
205 else: # how == 'right':
206 joined = (
207 left_df.drop(left_df.geometry.name, axis=1)
208 .merge(
209 result.merge(
210 right_df, left_on="_key_right", right_index=True, how="right"
211 ),
212 left_index=True,
213 right_on="_key_left",
214 how="right",
215 )
216 .set_index(index_right)
217 .drop(["_key_left", "_key_right"], axis=1)
218 )
219 joined.index.name = right_index_name
220
221 return joined
222
[end of geopandas/tools/sjoin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py
--- a/geopandas/tools/sjoin.py
+++ b/geopandas/tools/sjoin.py
@@ -90,12 +90,27 @@
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
- left_index_name = left_df.index.name
- left_df.index = left_df.index.rename(index_left)
+ try:
+ left_index_name = left_df.index.name
+ left_df.index = left_df.index.rename(index_left)
+ except TypeError:
+ index_left = [
+ "index_%s" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)
+ ]
+ left_index_name = left_df.index.names
+ left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
+
right_df = right_df.copy(deep=True)
- right_index_name = right_df.index.name
- right_df.index = right_df.index.rename(index_right)
+ try:
+ right_index_name = right_df.index.name
+ right_df.index = right_df.index.rename(index_right)
+ except TypeError:
+ index_right = [
+ "index_%s" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)
+ ]
+ right_index_name = right_df.index.names
+ right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
if op == "within":
@@ -184,7 +199,10 @@
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
- joined.index.name = left_index_name
+ if isinstance(index_left, list):
+ joined.index.names = left_index_name
+ else:
+ joined.index.name = left_index_name
elif how == "left":
result = result.set_index("_key_left")
@@ -200,7 +218,10 @@
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
- joined.index.name = left_index_name
+ if isinstance(index_left, list):
+ joined.index.names = left_index_name
+ else:
+ joined.index.name = left_index_name
else: # how == 'right':
joined = (
@@ -216,6 +237,9 @@
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
- joined.index.name = right_index_name
+ if isinstance(index_right, list):
+ joined.index.names = right_index_name
+ else:
+ joined.index.name = right_index_name
return joined
| {"golden_diff": "diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py\n--- a/geopandas/tools/sjoin.py\n+++ b/geopandas/tools/sjoin.py\n@@ -90,12 +90,27 @@\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n- left_index_name = left_df.index.name\n- left_df.index = left_df.index.rename(index_left)\n+ try:\n+ left_index_name = left_df.index.name\n+ left_df.index = left_df.index.rename(index_left)\n+ except TypeError:\n+ index_left = [\n+ \"index_%s\" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)\n+ ]\n+ left_index_name = left_df.index.names\n+ left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n+\n right_df = right_df.copy(deep=True)\n- right_index_name = right_df.index.name\n- right_df.index = right_df.index.rename(index_right)\n+ try:\n+ right_index_name = right_df.index.name\n+ right_df.index = right_df.index.rename(index_right)\n+ except TypeError:\n+ index_right = [\n+ \"index_%s\" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)\n+ ]\n+ right_index_name = right_df.index.names\n+ right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n \n if op == \"within\":\n@@ -184,7 +199,10 @@\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n- joined.index.name = left_index_name\n+ if isinstance(index_left, list):\n+ joined.index.names = left_index_name\n+ else:\n+ joined.index.name = left_index_name\n \n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n@@ -200,7 +218,10 @@\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n- joined.index.name = left_index_name\n+ if isinstance(index_left, list):\n+ joined.index.names = left_index_name\n+ else:\n+ joined.index.name = left_index_name\n \n else: # how == 'right':\n joined = (\n@@ -216,6 +237,9 @@\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n- joined.index.name = right_index_name\n+ if isinstance(index_right, list):\n+ joined.index.names = right_index_name\n+ else:\n+ joined.index.name = right_index_name\n \n return joined\n", "issue": "Spatial join error after using `explode()`\n@jorisvandenbossche There is a specific situation that produces an error with `.sjoin()`. \r\n\r\nI receive a following error: \r\n```\r\n File \"C:\\Hyapp\\Anaconda3\\lib\\site-packages\\pandas\\core\\indexes\\base.py\", line 1330, in set_names\r\n raise TypeError(\"Must pass list-like as `names`.\")\r\n\r\nTypeError: Must pass list-like as `names`.\r\n```\r\n\r\nI was able to narrow this down, and the error only happens after using `df.explode()` function to turn MultiPolygons into multiple Polygons. \r\n\r\nHere is an example to produce the error using some data from OSM: \r\n```\r\n# Test to produce an error\r\nimport osmnx as ox\r\nimport geopandas as gpd\r\nbuildings = ox.footprints_from_place(\"Punavuori, Finland\")\r\nbuildings = buildings.explode()\r\nlanduse = ox.footprints_from_place(\"Punavuori, Finland\", 'landuse')\r\njoin = gpd.sjoin(buildings, landuse)\r\n```\r\n\r\nThe solution for fixing this was to `reset_index()` after using the `.explode()`. I am not sure if this is a bug or expected behavior, but wanted to report my findings anyway. \ud83d\ude42 \r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n left_index_name = left_df.index.name\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n right_df = right_df.copy(deep=True)\n right_index_name = right_df.index.name\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True)\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n joined.index.name = left_index_name\n\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True, how=\"left\")\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n joined.index.name = left_index_name\n\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n joined.index.name = right_index_name\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}]} | 3,181 | 632 |
gh_patches_debug_25584 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-375 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot import captions with double quotes ICDAR Word Recognition
The original ICDAR 13/15 dataset for word recognition task contains captions with double quotes (e.g "READER\\"").
When I try to load this dataset, Datumaro throws an exception with the message:
`Line word_136.png, "(412\")": unexpected number of quotes in filename`.
</issue>
<code>
[start of datumaro/plugins/icdar_format/extractor.py]
1 # Copyright (C) 2020 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 from glob import iglob
6 import os.path as osp
7
8 import numpy as np
9
10 from datumaro.components.extractor import (
11 Bbox, Caption, DatasetItem, Importer, Mask, MaskCategories, Polygon,
12 SourceExtractor,
13 )
14 from datumaro.util.image import find_images
15 from datumaro.util.mask_tools import lazy_mask
16
17 from .format import IcdarPath, IcdarTask
18
19
20 class _IcdarExtractor(SourceExtractor):
21 def __init__(self, path, task, subset=None):
22 self._path = path
23 self._task = task
24
25 if task is IcdarTask.word_recognition:
26 if not osp.isfile(path):
27 raise FileNotFoundError(
28 "Can't read annotation file '%s'" % path)
29
30 if not subset:
31 subset = osp.basename(osp.dirname(path))
32 super().__init__(subset=subset)
33
34 self._dataset_dir = osp.dirname(osp.dirname(path))
35
36 self._items = list(self._load_recognition_items().values())
37 elif task in {IcdarTask.text_localization, IcdarTask.text_segmentation}:
38 if not osp.isdir(path):
39 raise NotADirectoryError(
40 "Can't open folder with annotation files '%s'" % path)
41
42 if not subset:
43 subset = osp.basename(path)
44 super().__init__(subset=subset)
45
46 self._dataset_dir = osp.dirname(path)
47
48 if task is IcdarTask.text_localization:
49 self._items = list(self._load_localization_items().values())
50 else:
51 self._items = list(self._load_segmentation_items().values())
52
53 def _load_recognition_items(self):
54 items = {}
55
56 with open(self._path, encoding='utf-8') as f:
57 for line in f:
58 line = line.strip()
59 objects = line.split(', ')
60 if len(objects) == 2:
61 image = objects[0]
62 objects = objects[1].split('\"')
63 if 1 < len(objects):
64 if len(objects) % 2:
65 captions = [objects[2 * i + 1]
66 for i in range(int(len(objects) / 2))]
67 else:
68 raise Exception("Line %s: unexpected number "
69 "of quotes in filename" % line)
70 else:
71 captions = objects[0].split()
72 else:
73 image = objects[0][:-1]
74 captions = []
75
76 item_id = osp.splitext(image)[0]
77 image_path = osp.join(osp.dirname(self._path),
78 IcdarPath.IMAGES_DIR, image)
79 if item_id not in items:
80 items[item_id] = DatasetItem(item_id, subset=self._subset,
81 image=image_path)
82
83 annotations = items[item_id].annotations
84 for caption in captions:
85 annotations.append(Caption(caption))
86
87 return items
88
89 def _load_localization_items(self):
90 items = {}
91
92 image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
93 if osp.isdir(image_dir):
94 images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
95 for p in find_images(image_dir, recursive=True) }
96 else:
97 images = {}
98
99 for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
100 item_id = osp.splitext(osp.relpath(path, self._path))[0]
101 if osp.basename(item_id).startswith('gt_'):
102 item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])
103 item_id = item_id.replace('\\', '/')
104
105 if item_id not in items:
106 items[item_id] = DatasetItem(item_id, subset=self._subset,
107 image=images.get(item_id))
108 annotations = items[item_id].annotations
109
110 with open(path, encoding='utf-8') as f:
111 for line in f:
112 line = line.strip()
113 objects = line.split('\"')
114 if 1 < len(objects):
115 if len(objects) == 3:
116 text = objects[1]
117 else:
118 raise Exception("Line %s: unexpected number "
119 "of quotes in filename" % line)
120 else:
121 text = ''
122 objects = objects[0].split()
123 if len(objects) == 1:
124 objects = objects[0].split(',')
125
126 if 8 <= len(objects):
127 points = [float(p) for p in objects[:8]]
128
129 attributes = {}
130 if 0 < len(text):
131 attributes['text'] = text
132 elif len(objects) == 9:
133 text = objects[8]
134 attributes['text'] = text
135
136 annotations.append(
137 Polygon(points, attributes=attributes))
138 elif 4 <= len(objects):
139 x = float(objects[0])
140 y = float(objects[1])
141 w = float(objects[2]) - x
142 h = float(objects[3]) - y
143
144 attributes = {}
145 if 0 < len(text):
146 attributes['text'] = text
147 elif len(objects) == 5:
148 text = objects[4]
149 attributes['text'] = text
150
151 annotations.append(
152 Bbox(x, y, w, h, attributes=attributes))
153 return items
154
155 def _load_segmentation_items(self):
156 items = {}
157
158 image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
159 if osp.isdir(image_dir):
160 images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
161 for p in find_images(image_dir, recursive=True) }
162 else:
163 images = {}
164
165 for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
166 item_id = osp.splitext(osp.relpath(path, self._path))[0]
167 item_id = item_id.replace('\\', '/')
168 if item_id.endswith('_GT'):
169 item_id = item_id[:-3]
170
171 if item_id not in items:
172 items[item_id] = DatasetItem(item_id, subset=self._subset,
173 image=images.get(item_id))
174 annotations = items[item_id].annotations
175
176 colors = [(255, 255, 255)]
177 chars = ['']
178 centers = [0]
179 groups = [0]
180 group = 1
181 number_in_group = 0
182 with open(path, encoding='utf-8') as f:
183 for line in f:
184 line = line.strip()
185 if line == '':
186 if number_in_group == 1:
187 groups[len(groups) - 1] = 0
188 else:
189 group += 1
190 number_in_group = 0
191 continue
192
193 objects = line.split()
194 if objects[0][0] == '#':
195 objects[0] = objects[0][1:]
196 objects[9] = '\" \"'
197 objects.pop()
198 if len(objects) != 10:
199 raise Exception("Line %s contains the wrong number "
200 "of arguments, e.g. '241 73 144 1 4 0 3 1 4 \"h\"" % line)
201
202 centers.append(objects[3] + ' ' + objects[4])
203 groups.append(group)
204 colors.append(tuple(int(o) for o in objects[:3]))
205 char = objects[9]
206 if char[0] == '\"' and char[-1] == '\"':
207 char = char[1:-1]
208 chars.append(char)
209 number_in_group += 1
210 if number_in_group == 1:
211 groups[len(groups) - 1] = 0
212
213 mask_categories = MaskCategories(
214 {i: colors[i] for i in range(len(colors))})
215 inverse_cls_colormap = mask_categories.inverse_colormap
216
217 gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)
218 if osp.isfile(gt_path):
219 # load mask through cache
220 mask = lazy_mask(gt_path, inverse_cls_colormap)
221 mask = mask()
222
223 classes = np.unique(mask)
224 for label_id in classes:
225 if label_id == 0:
226 continue
227 i = int(label_id)
228 annotations.append(Mask(group=groups[i],
229 image=self._lazy_extract_mask(mask, label_id),
230 attributes={ 'index': i - 1,
231 'color': ' '.join(str(p) for p in colors[i]),
232 'text': chars[i], 'center': centers[i] }
233 ))
234 return items
235
236 @staticmethod
237 def _lazy_extract_mask(mask, c):
238 return lambda: mask == c
239
240 class IcdarWordRecognitionExtractor(_IcdarExtractor):
241 def __init__(self, path, **kwargs):
242 kwargs['task'] = IcdarTask.word_recognition
243 super().__init__(path, **kwargs)
244
245 class IcdarTextLocalizationExtractor(_IcdarExtractor):
246 def __init__(self, path, **kwargs):
247 kwargs['task'] = IcdarTask.text_localization
248 super().__init__(path, **kwargs)
249
250 class IcdarTextSegmentationExtractor(_IcdarExtractor):
251 def __init__(self, path, **kwargs):
252 kwargs['task'] = IcdarTask.text_segmentation
253 super().__init__(path, **kwargs)
254
255
256 class IcdarWordRecognitionImporter(Importer):
257 @classmethod
258 def find_sources(cls, path):
259 return cls._find_sources_recursive(path, '.txt', 'icdar_word_recognition')
260
261 class IcdarTextLocalizationImporter(Importer):
262 @classmethod
263 def find_sources(cls, path):
264 return cls._find_sources_recursive(path, '', 'icdar_text_localization')
265
266 class IcdarTextSegmentationImporter(Importer):
267 @classmethod
268 def find_sources(cls, path):
269 return cls._find_sources_recursive(path, '', 'icdar_text_segmentation')
270
[end of datumaro/plugins/icdar_format/extractor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/datumaro/plugins/icdar_format/extractor.py b/datumaro/plugins/icdar_format/extractor.py
--- a/datumaro/plugins/icdar_format/extractor.py
+++ b/datumaro/plugins/icdar_format/extractor.py
@@ -3,6 +3,7 @@
# SPDX-License-Identifier: MIT
from glob import iglob
+import logging as log
import os.path as osp
import numpy as np
@@ -59,16 +60,13 @@
objects = line.split(', ')
if len(objects) == 2:
image = objects[0]
- objects = objects[1].split('\"')
- if 1 < len(objects):
- if len(objects) % 2:
- captions = [objects[2 * i + 1]
- for i in range(int(len(objects) / 2))]
+ captions = []
+ for caption in objects[1:]:
+ if caption[0] != '\"' or caption[-1] != '\"':
+ log.warning("Line %s: unexpected number "
+ "of quotes" % line)
else:
- raise Exception("Line %s: unexpected number "
- "of quotes in filename" % line)
- else:
- captions = objects[0].split()
+ captions.append(caption.replace('\\', '')[1:-1])
else:
image = objects[0][:-1]
captions = []
| {"golden_diff": "diff --git a/datumaro/plugins/icdar_format/extractor.py b/datumaro/plugins/icdar_format/extractor.py\n--- a/datumaro/plugins/icdar_format/extractor.py\n+++ b/datumaro/plugins/icdar_format/extractor.py\n@@ -3,6 +3,7 @@\n # SPDX-License-Identifier: MIT\n \n from glob import iglob\n+import logging as log\n import os.path as osp\n \n import numpy as np\n@@ -59,16 +60,13 @@\n objects = line.split(', ')\n if len(objects) == 2:\n image = objects[0]\n- objects = objects[1].split('\\\"')\n- if 1 < len(objects):\n- if len(objects) % 2:\n- captions = [objects[2 * i + 1]\n- for i in range(int(len(objects) / 2))]\n+ captions = []\n+ for caption in objects[1:]:\n+ if caption[0] != '\\\"' or caption[-1] != '\\\"':\n+ log.warning(\"Line %s: unexpected number \"\n+ \"of quotes\" % line)\n else:\n- raise Exception(\"Line %s: unexpected number \"\n- \"of quotes in filename\" % line)\n- else:\n- captions = objects[0].split()\n+ captions.append(caption.replace('\\\\', '')[1:-1])\n else:\n image = objects[0][:-1]\n captions = []\n", "issue": "Cannot import captions with double quotes ICDAR Word Recognition\nThe original ICDAR 13/15 dataset for word recognition task contains captions with double quotes (e.g \"READER\\\\\"\").\r\nWhen I try to load this dataset, Datumaro throws an exception with the message:\r\n`Line word_136.png, \"(412\\\")\": unexpected number of quotes in filename`.\n", "before_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom glob import iglob\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.components.extractor import (\n Bbox, Caption, DatasetItem, Importer, Mask, MaskCategories, Polygon,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\nfrom datumaro.util.mask_tools import lazy_mask\n\nfrom .format import IcdarPath, IcdarTask\n\n\nclass _IcdarExtractor(SourceExtractor):\n def __init__(self, path, task, subset=None):\n self._path = path\n self._task = task\n\n if task is IcdarTask.word_recognition:\n if not osp.isfile(path):\n raise FileNotFoundError(\n \"Can't read annotation file '%s'\" % path)\n\n if not subset:\n subset = osp.basename(osp.dirname(path))\n super().__init__(subset=subset)\n\n self._dataset_dir = osp.dirname(osp.dirname(path))\n\n self._items = list(self._load_recognition_items().values())\n elif task in {IcdarTask.text_localization, IcdarTask.text_segmentation}:\n if not osp.isdir(path):\n raise NotADirectoryError(\n \"Can't open folder with annotation files '%s'\" % path)\n\n if not subset:\n subset = osp.basename(path)\n super().__init__(subset=subset)\n\n self._dataset_dir = osp.dirname(path)\n\n if task is IcdarTask.text_localization:\n self._items = list(self._load_localization_items().values())\n else:\n self._items = list(self._load_segmentation_items().values())\n\n def _load_recognition_items(self):\n items = {}\n\n with open(self._path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n objects = line.split(', ')\n if len(objects) == 2:\n image = objects[0]\n objects = objects[1].split('\\\"')\n if 1 < len(objects):\n if len(objects) % 2:\n captions = [objects[2 * i + 1]\n for i in range(int(len(objects) / 2))]\n else:\n raise Exception(\"Line %s: unexpected number \"\n \"of quotes in filename\" % line)\n else:\n captions = objects[0].split()\n else:\n image = objects[0][:-1]\n captions = []\n\n item_id = osp.splitext(image)[0]\n image_path = osp.join(osp.dirname(self._path),\n IcdarPath.IMAGES_DIR, image)\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=image_path)\n\n annotations = items[item_id].annotations\n for caption in captions:\n annotations.append(Caption(caption))\n\n return items\n\n def _load_localization_items(self):\n items = {}\n\n image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)\n if osp.isdir(image_dir):\n images = { osp.splitext(osp.relpath(p, image_dir))[0]: p\n for p in find_images(image_dir, recursive=True) }\n else:\n images = {}\n\n for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):\n item_id = osp.splitext(osp.relpath(path, self._path))[0]\n if osp.basename(item_id).startswith('gt_'):\n item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])\n item_id = item_id.replace('\\\\', '/')\n\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=images.get(item_id))\n annotations = items[item_id].annotations\n\n with open(path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n objects = line.split('\\\"')\n if 1 < len(objects):\n if len(objects) == 3:\n text = objects[1]\n else:\n raise Exception(\"Line %s: unexpected number \"\n \"of quotes in filename\" % line)\n else:\n text = ''\n objects = objects[0].split()\n if len(objects) == 1:\n objects = objects[0].split(',')\n\n if 8 <= len(objects):\n points = [float(p) for p in objects[:8]]\n\n attributes = {}\n if 0 < len(text):\n attributes['text'] = text\n elif len(objects) == 9:\n text = objects[8]\n attributes['text'] = text\n\n annotations.append(\n Polygon(points, attributes=attributes))\n elif 4 <= len(objects):\n x = float(objects[0])\n y = float(objects[1])\n w = float(objects[2]) - x\n h = float(objects[3]) - y\n\n attributes = {}\n if 0 < len(text):\n attributes['text'] = text\n elif len(objects) == 5:\n text = objects[4]\n attributes['text'] = text\n\n annotations.append(\n Bbox(x, y, w, h, attributes=attributes))\n return items\n\n def _load_segmentation_items(self):\n items = {}\n\n image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)\n if osp.isdir(image_dir):\n images = { osp.splitext(osp.relpath(p, image_dir))[0]: p\n for p in find_images(image_dir, recursive=True) }\n else:\n images = {}\n\n for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):\n item_id = osp.splitext(osp.relpath(path, self._path))[0]\n item_id = item_id.replace('\\\\', '/')\n if item_id.endswith('_GT'):\n item_id = item_id[:-3]\n\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=images.get(item_id))\n annotations = items[item_id].annotations\n\n colors = [(255, 255, 255)]\n chars = ['']\n centers = [0]\n groups = [0]\n group = 1\n number_in_group = 0\n with open(path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == '':\n if number_in_group == 1:\n groups[len(groups) - 1] = 0\n else:\n group += 1\n number_in_group = 0\n continue\n\n objects = line.split()\n if objects[0][0] == '#':\n objects[0] = objects[0][1:]\n objects[9] = '\\\" \\\"'\n objects.pop()\n if len(objects) != 10:\n raise Exception(\"Line %s contains the wrong number \"\n \"of arguments, e.g. '241 73 144 1 4 0 3 1 4 \\\"h\\\"\" % line)\n\n centers.append(objects[3] + ' ' + objects[4])\n groups.append(group)\n colors.append(tuple(int(o) for o in objects[:3]))\n char = objects[9]\n if char[0] == '\\\"' and char[-1] == '\\\"':\n char = char[1:-1]\n chars.append(char)\n number_in_group += 1\n if number_in_group == 1:\n groups[len(groups) - 1] = 0\n\n mask_categories = MaskCategories(\n {i: colors[i] for i in range(len(colors))})\n inverse_cls_colormap = mask_categories.inverse_colormap\n\n gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)\n if osp.isfile(gt_path):\n # load mask through cache\n mask = lazy_mask(gt_path, inverse_cls_colormap)\n mask = mask()\n\n classes = np.unique(mask)\n for label_id in classes:\n if label_id == 0:\n continue\n i = int(label_id)\n annotations.append(Mask(group=groups[i],\n image=self._lazy_extract_mask(mask, label_id),\n attributes={ 'index': i - 1,\n 'color': ' '.join(str(p) for p in colors[i]),\n 'text': chars[i], 'center': centers[i] }\n ))\n return items\n\n @staticmethod\n def _lazy_extract_mask(mask, c):\n return lambda: mask == c\n\nclass IcdarWordRecognitionExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.word_recognition\n super().__init__(path, **kwargs)\n\nclass IcdarTextLocalizationExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.text_localization\n super().__init__(path, **kwargs)\n\nclass IcdarTextSegmentationExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.text_segmentation\n super().__init__(path, **kwargs)\n\n\nclass IcdarWordRecognitionImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '.txt', 'icdar_word_recognition')\n\nclass IcdarTextLocalizationImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '', 'icdar_text_localization')\n\nclass IcdarTextSegmentationImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '', 'icdar_text_segmentation')\n", "path": "datumaro/plugins/icdar_format/extractor.py"}]} | 3,486 | 317 |
gh_patches_debug_23513 | rasdani/github-patches | git_diff | pyca__cryptography-2385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.6 incompatibility because of getargspec
Hi, on testing pywikibot on 3.6 I noticed that your package uses `inspect.getargspec` which has been removed in 3.6 after it has been deprecated in 3.5 (although the documentation mentions it has been deprecated since Python 3).
I know that Python 3.6 hasn't been released yet but I just wanted to let you know so you have enough time to prevent issues before it is released.
Here the Travis build: https://travis-ci.org/xZise/pywikibot-core/jobs/82446934
</issue>
<code>
[start of src/cryptography/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import struct
11 import sys
12 import warnings
13
14
15 DeprecatedIn09 = DeprecationWarning
16 DeprecatedIn10 = PendingDeprecationWarning
17
18
19 def read_only_property(name):
20 return property(lambda self: getattr(self, name))
21
22
23 def register_interface(iface):
24 def register_decorator(klass):
25 verify_interface(iface, klass)
26 iface.register(klass)
27 return klass
28 return register_decorator
29
30
31 if hasattr(int, "from_bytes"):
32 int_from_bytes = int.from_bytes
33 else:
34 def int_from_bytes(data, byteorder, signed=False):
35 assert byteorder == 'big'
36 assert not signed
37
38 if len(data) % 4 != 0:
39 data = (b'\x00' * (4 - (len(data) % 4))) + data
40
41 result = 0
42
43 while len(data) > 0:
44 digit, = struct.unpack('>I', data[:4])
45 result = (result << 32) + digit
46 data = data[4:]
47
48 return result
49
50
51 def int_to_bytes(integer):
52 hex_string = '%x' % integer
53 n = len(hex_string)
54 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
55
56
57 class InterfaceNotImplemented(Exception):
58 pass
59
60
61 def verify_interface(iface, klass):
62 for method in iface.__abstractmethods__:
63 if not hasattr(klass, method):
64 raise InterfaceNotImplemented(
65 "{0} is missing a {1!r} method".format(klass, method)
66 )
67 if isinstance(getattr(iface, method), abc.abstractproperty):
68 # Can't properly verify these yet.
69 continue
70 spec = inspect.getargspec(getattr(iface, method))
71 actual = inspect.getargspec(getattr(klass, method))
72 if spec != actual:
73 raise InterfaceNotImplemented(
74 "{0}.{1}'s signature differs from the expected. Expected: "
75 "{2!r}. Received: {3!r}".format(
76 klass, method, spec, actual
77 )
78 )
79
80
81 if sys.version_info >= (2, 7):
82 def bit_length(x):
83 return x.bit_length()
84 else:
85 def bit_length(x):
86 return len(bin(x)) - (2 + (x <= 0))
87
88
89 class _DeprecatedValue(object):
90 def __init__(self, value, message, warning_class):
91 self.value = value
92 self.message = message
93 self.warning_class = warning_class
94
95
96 class _ModuleWithDeprecations(object):
97 def __init__(self, module):
98 self.__dict__["_module"] = module
99
100 def __getattr__(self, attr):
101 obj = getattr(self._module, attr)
102 if isinstance(obj, _DeprecatedValue):
103 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
104 obj = obj.value
105 return obj
106
107 def __setattr__(self, attr, value):
108 setattr(self._module, attr, value)
109
110 def __dir__(self):
111 return ["_module"] + dir(self._module)
112
113
114 def deprecated(value, module_name, message, warning_class):
115 module = sys.modules[module_name]
116 if not isinstance(module, _ModuleWithDeprecations):
117 sys.modules[module_name] = module = _ModuleWithDeprecations(module)
118 return _DeprecatedValue(value, message, warning_class)
119
[end of src/cryptography/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -58,6 +58,12 @@
pass
+if hasattr(inspect, "signature"):
+ signature = inspect.signature
+else:
+ signature = inspect.getargspec
+
+
def verify_interface(iface, klass):
for method in iface.__abstractmethods__:
if not hasattr(klass, method):
@@ -67,13 +73,13 @@
if isinstance(getattr(iface, method), abc.abstractproperty):
# Can't properly verify these yet.
continue
- spec = inspect.getargspec(getattr(iface, method))
- actual = inspect.getargspec(getattr(klass, method))
- if spec != actual:
+ sig = signature(getattr(iface, method))
+ actual = signature(getattr(klass, method))
+ if sig != actual:
raise InterfaceNotImplemented(
"{0}.{1}'s signature differs from the expected. Expected: "
"{2!r}. Received: {3!r}".format(
- klass, method, spec, actual
+ klass, method, sig, actual
)
)
| {"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -58,6 +58,12 @@\n pass\n \n \n+if hasattr(inspect, \"signature\"):\n+ signature = inspect.signature\n+else:\n+ signature = inspect.getargspec\n+\n+\n def verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n@@ -67,13 +73,13 @@\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n- spec = inspect.getargspec(getattr(iface, method))\n- actual = inspect.getargspec(getattr(klass, method))\n- if spec != actual:\n+ sig = signature(getattr(iface, method))\n+ actual = signature(getattr(klass, method))\n+ if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n- klass, method, spec, actual\n+ klass, method, sig, actual\n )\n )\n", "issue": "Python 3.6 incompatibility because of getargspec\nHi, on testing pywikibot on 3.6 I noticed that your package uses `inspect.getargspec` which has been removed in 3.6 after it has been deprecated in 3.5 (although the documentation mentions it has been deprecated since Python 3).\n\nI know that Python 3.6 hasn't been released yet but I just wanted to let you know so you have enough time to prevent issues before it is released.\n\nHere the Travis build: https://travis-ci.org/xZise/pywikibot-core/jobs/82446934\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\nDeprecatedIn09 = DeprecationWarning\nDeprecatedIn10 = PendingDeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer):\n hex_string = '%x' % integer\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n spec = inspect.getargspec(getattr(iface, method))\n actual = inspect.getargspec(getattr(klass, method))\n if spec != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, spec, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}]} | 1,719 | 272 |
gh_patches_debug_2763 | rasdani/github-patches | git_diff | google__timesketch-949 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Yeti analyzer crash
From the celery log:
```
[2019-07-17 09:11:37,463: ERROR/ForkPoolWorker-1] Task timesketch.lib.tasks.run_sketch_analyzer[46d24990-12df-4c88-a79b-a3b5f1c04b01] raised unexpected: TypeError("unsupported operand type(s) for +: 'NoneType' and 'unicode'",)
Traceback (most recent call last):
File "/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py", line 374, in trace_task
R = retval = fun(*args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/__init__.py", line 181, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py", line 629, in __protected_call__
return self.run(*args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/tasks.py", line 339, in run_sketch_analyzer
result = analyzer.run_wrapper()
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py", line 37, in wrapper
func_return = func(self, *args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py", line 403, in run_wrapper
result = self.run()
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py", line 92, in run
self.get_intrusion_sets()
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py", line 75, in get_intrusion_sets
self.yeti_api_root + '/entities/filter/',
TypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode'
```
</issue>
<code>
[start of timesketch/lib/analyzers/yetiindicators.py]
1 """Index analyzer plugin for Yeti indicators."""
2 from __future__ import unicode_literals
3
4 from flask import current_app
5 import requests
6
7 from timesketch.lib.analyzers import interface
8 from timesketch.lib.analyzers import manager
9 from timesketch.lib import emojis
10
11
12 def build_query_for_indicators(indicators):
13 """Builds an Elasticsearch query for Yeti indicator patterns.
14
15 Prepends and appends .* to the regex to be able to search within a field.
16
17 Returns:
18 The resulting ES query string.
19 """
20 query = []
21 for domain in indicators:
22 query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))
23 return ' OR '.join(query)
24
25
26 class YetiIndicators(interface.BaseSketchAnalyzer):
27 """Index analyzer for Yeti threat intel indicators."""
28
29 NAME = 'yetiindicators'
30 DEPENDENCIES = frozenset(['domain'])
31
32 def __init__(self, index_name, sketch_id):
33 """Initialize the Index Analyzer.
34
35 Args:
36 index_name: Elasticsearch index name
37 """
38 super(YetiIndicators, self).__init__(index_name, sketch_id)
39 self.intel = {}
40 self.yeti_api_root = current_app.config.get('YETI_API_ROOT')
41 self.yeti_api_key = current_app.config.get('YETI_API_KEY')
42 self.yeti_indicator_labels = current_app.config.get(
43 'YETI_INDICATOR_LABELS', [])
44
45 def get_bad_domain_indicators(self, entity_id):
46 """Retrieves a list of indicators associated to a given entity.
47
48 Args:
49 entity_id (str): STIX ID of the entity to get associated inticators
50 from. (typically an Intrusion Set)
51
52 Returns:
53 A list of JSON objects describing a Yeti Indicator.
54 """
55 results = requests.post(
56 self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),
57 headers={'X-Yeti-API': self.yeti_api_key},
58 )
59 if results.status_code != 200:
60 return []
61 domain_indicators = []
62 for neighbor in results.json().get('vertices', {}).values():
63 if neighbor['type'] == 'x-regex' and \
64 set(self.yeti_indicator_labels) <= set(neighbor['labels']):
65 domain_indicators.append(neighbor)
66
67 return domain_indicators
68
69 def get_intrusion_sets(self):
70 """Populates the intel attribute with data from Yeti.
71
72 Retrieved intel consists of Intrusion sets and associated Indicators.
73 """
74 results = requests.post(
75 self.yeti_api_root + '/entities/filter/',
76 json={'name': '', 'type': 'intrusion-set'},
77 headers={'X-Yeti-API': self.yeti_api_key},
78 )
79 if results.status_code != 200:
80 return
81 self.intel = {item['id']: item for item in results.json()}
82 for _id in self.intel:
83 self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)
84
85 def run(self):
86 """Entry point for the analyzer.
87
88 Returns:
89 String with summary of the analyzer result
90 """
91
92 self.get_intrusion_sets()
93 actors_found = []
94 for intrusion_set in self.intel.values():
95 if not intrusion_set['indicators']:
96 continue
97
98 found = False
99
100 for indicator in intrusion_set['indicators']:
101 query = build_query_for_indicators([indicator])
102
103 events = self.event_stream(query_string=query,
104 return_fields=[])
105
106 name = intrusion_set['name']
107 for event in events:
108 found = True
109 event.add_emojis([emojis.get_emoji('SKULL')])
110 event.add_tags([name])
111 event.commit()
112 event.add_comment(
113 'Indicator "{0:s}" found for actor "{1:s}"'.format(
114 indicator['name'], name))
115
116 if found:
117 actors_found.append(name)
118 self.sketch.add_view(
119 'Domain activity for actor {0:s}'.format(name),
120 self.NAME,
121 query_string=query)
122
123 if actors_found:
124 return '{0:d} actors were found! [{1:s}]'.format(
125 len(actors_found), ', '.join(actors_found))
126 return 'No indicators were found in the timeline.'
127
128
129 manager.AnalysisManager.register_analyzer(YetiIndicators)
130
[end of timesketch/lib/analyzers/yetiindicators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/timesketch/lib/analyzers/yetiindicators.py b/timesketch/lib/analyzers/yetiindicators.py
--- a/timesketch/lib/analyzers/yetiindicators.py
+++ b/timesketch/lib/analyzers/yetiindicators.py
@@ -88,6 +88,8 @@
Returns:
String with summary of the analyzer result
"""
+ if not self.yeti_api_root or not self.yeti_api_key:
+ return 'No Yeti configuration settings found, aborting.'
self.get_intrusion_sets()
actors_found = []
| {"golden_diff": "diff --git a/timesketch/lib/analyzers/yetiindicators.py b/timesketch/lib/analyzers/yetiindicators.py\n--- a/timesketch/lib/analyzers/yetiindicators.py\n+++ b/timesketch/lib/analyzers/yetiindicators.py\n@@ -88,6 +88,8 @@\n Returns:\n String with summary of the analyzer result\n \"\"\"\n+ if not self.yeti_api_root or not self.yeti_api_key:\n+ return 'No Yeti configuration settings found, aborting.'\n \n self.get_intrusion_sets()\n actors_found = []\n", "issue": "Yeti analyzer crash\nFrom the celery log:\r\n\r\n```\r\n[2019-07-17 09:11:37,463: ERROR/ForkPoolWorker-1] Task timesketch.lib.tasks.run_sketch_analyzer[46d24990-12df-4c88-a79b-a3b5f1c04b01] raised unexpected: TypeError(\"unsupported operand type(s) for +: 'NoneType' and 'unicode'\",)\r\nTraceback (most recent call last):\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py\", line 374, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/__init__.py\", line 181, in __call__\r\n return TaskBase.__call__(self, *args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py\", line 629, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/tasks.py\", line 339, in run_sketch_analyzer\r\n result = analyzer.run_wrapper()\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py\", line 37, in wrapper\r\n func_return = func(self, *args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py\", line 403, in run_wrapper\r\n result = self.run()\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py\", line 92, in run\r\n self.get_intrusion_sets()\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py\", line 75, in get_intrusion_sets\r\n self.yeti_api_root + '/entities/filter/',\r\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode'\r\n```\n", "before_files": [{"content": "\"\"\"Index analyzer plugin for Yeti indicators.\"\"\"\nfrom __future__ import unicode_literals\n\nfrom flask import current_app\nimport requests\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nfrom timesketch.lib import emojis\n\n\ndef build_query_for_indicators(indicators):\n \"\"\"Builds an Elasticsearch query for Yeti indicator patterns.\n\n Prepends and appends .* to the regex to be able to search within a field.\n\n Returns:\n The resulting ES query string.\n \"\"\"\n query = []\n for domain in indicators:\n query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))\n return ' OR '.join(query)\n\n\nclass YetiIndicators(interface.BaseSketchAnalyzer):\n \"\"\"Index analyzer for Yeti threat intel indicators.\"\"\"\n\n NAME = 'yetiindicators'\n DEPENDENCIES = frozenset(['domain'])\n\n def __init__(self, index_name, sketch_id):\n \"\"\"Initialize the Index Analyzer.\n\n Args:\n index_name: Elasticsearch index name\n \"\"\"\n super(YetiIndicators, self).__init__(index_name, sketch_id)\n self.intel = {}\n self.yeti_api_root = current_app.config.get('YETI_API_ROOT')\n self.yeti_api_key = current_app.config.get('YETI_API_KEY')\n self.yeti_indicator_labels = current_app.config.get(\n 'YETI_INDICATOR_LABELS', [])\n\n def get_bad_domain_indicators(self, entity_id):\n \"\"\"Retrieves a list of indicators associated to a given entity.\n\n Args:\n entity_id (str): STIX ID of the entity to get associated inticators\n from. (typically an Intrusion Set)\n\n Returns:\n A list of JSON objects describing a Yeti Indicator.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return []\n domain_indicators = []\n for neighbor in results.json().get('vertices', {}).values():\n if neighbor['type'] == 'x-regex' and \\\n set(self.yeti_indicator_labels) <= set(neighbor['labels']):\n domain_indicators.append(neighbor)\n\n return domain_indicators\n\n def get_intrusion_sets(self):\n \"\"\"Populates the intel attribute with data from Yeti.\n\n Retrieved intel consists of Intrusion sets and associated Indicators.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/filter/',\n json={'name': '', 'type': 'intrusion-set'},\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return\n self.intel = {item['id']: item for item in results.json()}\n for _id in self.intel:\n self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result\n \"\"\"\n\n self.get_intrusion_sets()\n actors_found = []\n for intrusion_set in self.intel.values():\n if not intrusion_set['indicators']:\n continue\n\n found = False\n\n for indicator in intrusion_set['indicators']:\n query = build_query_for_indicators([indicator])\n\n events = self.event_stream(query_string=query,\n return_fields=[])\n\n name = intrusion_set['name']\n for event in events:\n found = True\n event.add_emojis([emojis.get_emoji('SKULL')])\n event.add_tags([name])\n event.commit()\n event.add_comment(\n 'Indicator \"{0:s}\" found for actor \"{1:s}\"'.format(\n indicator['name'], name))\n\n if found:\n actors_found.append(name)\n self.sketch.add_view(\n 'Domain activity for actor {0:s}'.format(name),\n self.NAME,\n query_string=query)\n\n if actors_found:\n return '{0:d} actors were found! [{1:s}]'.format(\n len(actors_found), ', '.join(actors_found))\n return 'No indicators were found in the timeline.'\n\n\nmanager.AnalysisManager.register_analyzer(YetiIndicators)\n", "path": "timesketch/lib/analyzers/yetiindicators.py"}]} | 2,296 | 134 |
gh_patches_debug_492 | rasdani/github-patches | git_diff | pytorch__ignite-506 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug with ProgressBar with TerminateOnNan
If we attach `ProgressBar` and `TerminateOnNaN` handlers to a trainer and `TerminateOnNan` stops training on the first iteration. We have the following error:
```python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-36-b4ac10e6ccc4> in <module>
----> 1 trainer.run(train_ab_loader, max_epochs=200)
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)
357 except BaseException as e:
358 self._logger.error("Engine run is terminating due to exception: %s.", str(e))
--> 359 self._handle_exception(e)
360
361 return self.state
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _handle_exception(self, e)
322 self._fire_event(Events.EXCEPTION_RAISED, e)
323 else:
--> 324 raise e
325
326 def run(self, data, max_epochs=1):
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)
350 self._fire_event(Events.EPOCH_COMPLETED)
351
--> 352 self._fire_event(Events.COMPLETED)
353 time_taken = time.time() - start_time
354 hours, mins, secs = _to_hours_mins_secs(time_taken)
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _fire_event(self, event_name, *event_args, **event_kwargs)
257 for func, args, kwargs in self._event_handlers[event_name]:
258 kwargs.update(event_kwargs)
--> 259 func(self, *(event_args + args), **kwargs)
260
261 def fire_event(self, event_name):
/opt/conda/lib/python3.7/site-packages/ignite/contrib/handlers/tqdm_logger.py in _close(self, engine)
115
116 def _close(self, engine):
--> 117 self.pbar.close()
118 self.pbar = None
119
AttributeError: 'NoneType' object has no attribute 'close'
```
</issue>
<code>
[start of ignite/contrib/handlers/tqdm_logger.py]
1 # -*- coding: utf-8 -*-
2 import numbers
3 import warnings
4
5 import torch
6
7 from ignite.engine import Events
8
9 from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
10
11
12 class ProgressBar(BaseLogger):
13 """
14 TQDM progress bar handler to log training progress and computed metrics.
15
16 Args:
17 persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
18 bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
19 [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
20 Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
21 l_bar='{desc}: {percentage:3.0f}%|' and
22 r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
23 formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
24 **tqdm_kwargs: kwargs passed to tqdm progress bar.
25 By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
26 number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
27 "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
28
29 Examples:
30
31 Simple progress bar
32
33 .. code-block:: python
34
35 trainer = create_supervised_trainer(model, optimizer, loss)
36
37 pbar = ProgressBar()
38 pbar.attach(trainer)
39
40 # Progress bar will looks like
41 # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
42
43 Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`
44 (such as :class:`~ignite.metrics.RunningAverage`)
45
46 .. code-block:: python
47
48 trainer = create_supervised_trainer(model, optimizer, loss)
49
50 RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
51
52 pbar = ProgressBar()
53 pbar.attach(trainer, ['loss'])
54
55 # Progress bar will looks like
56 # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
57
58 Directly attach the engine's output
59
60 .. code-block:: python
61
62 trainer = create_supervised_trainer(model, optimizer, loss)
63
64 pbar = ProgressBar()
65 pbar.attach(trainer, output_transform=lambda x: {'loss': x})
66
67 # Progress bar will looks like
68 # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
69
70 Note:
71 When adding attaching the progress bar to an engine, it is recommend that you replace
72 every print operation in the engine's handlers triggered every iteration with
73 ``pbar.log_message`` to guarantee the correct format of the stdout.
74
75 Note:
76 When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
77 please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
78 Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
79 to an empty string value.
80
81 """
82
83 events_order = [
84 Events.STARTED,
85 Events.EPOCH_STARTED,
86 Events.ITERATION_STARTED,
87 Events.ITERATION_COMPLETED,
88 Events.EPOCH_COMPLETED,
89 Events.COMPLETED
90 ]
91
92 def __init__(self, persist=False,
93 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
94 **tqdm_kwargs):
95
96 try:
97 from tqdm.autonotebook import tqdm
98 except ImportError:
99 raise RuntimeError("This contrib module requires tqdm to be installed. "
100 "Please install it with command: \n pip install tqdm")
101
102 self.pbar_cls = tqdm
103 self.pbar = None
104 self.persist = persist
105 self.bar_format = bar_format
106 self.tqdm_kwargs = tqdm_kwargs
107
108 def _reset(self, pbar_total):
109 self.pbar = self.pbar_cls(
110 total=pbar_total,
111 leave=self.persist,
112 bar_format=self.bar_format,
113 **self.tqdm_kwargs
114 )
115
116 def _close(self, engine):
117 self.pbar.close()
118 self.pbar = None
119
120 @staticmethod
121 def _compare_lt(event1, event2):
122 i1 = ProgressBar.events_order.index(event1)
123 i2 = ProgressBar.events_order.index(event2)
124 return i1 < i2
125
126 @staticmethod
127 def log_message(message):
128 """
129 Logs a message, preserving the progress bar correct output format.
130
131 Args:
132 message (str): string you wish to log.
133 """
134 from tqdm import tqdm
135 tqdm.write(message)
136
137 def attach(self, engine, metric_names=None, output_transform=None,
138 event_name=Events.ITERATION_COMPLETED,
139 closing_event_name=Events.EPOCH_COMPLETED):
140 """
141 Attaches the progress bar to an engine object.
142
143 Args:
144 engine (Engine): engine object.
145 metric_names (list, optional): list of the metrics names to log as the bar progresses
146 output_transform (callable, optional): a function to select what you want to print from the engine's
147 output. This function may return either a dictionary with entries in the format of ``{name: value}``,
148 or a single scalar, which will be displayed with the default name `output`.
149 event_name: event's name on which the progress bar advances. Valid events are from
150 :class:`~ignite.engine.Events`.
151 closing_event_name: event's name on which the progress bar is closed. Valid events are from
152 :class:`~ignite.engine.Events`.
153 """
154 desc = self.tqdm_kwargs.get("desc", "Epoch")
155
156 if not (event_name in Events and closing_event_name in Events):
157 raise ValueError("Logging and closing events should be only ignite.engine.Events")
158
159 if not self._compare_lt(event_name, closing_event_name):
160 raise ValueError("Logging event {} should be called before closing event {}"
161 .format(event_name, closing_event_name))
162
163 log_handler = _OutputHandler(desc, metric_names, output_transform,
164 event_name=event_name,
165 closing_event_name=closing_event_name)
166 super(ProgressBar, self).attach(engine, log_handler, event_name)
167 engine.add_event_handler(closing_event_name, self._close)
168
169
170 class _OutputHandler(BaseOutputHandler):
171 """Helper handler to log engine's output and/or metrics
172
173 Args:
174 description (str): progress bar description.
175 metric_names (list of str, optional): list of metric names to plot.
176 output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
177 For example, `output_transform = lambda output: output`
178 This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot
179 with corresponding keys.
180 event_name: event's name on which the progress bar advances. Valid events are from
181 :class:`~ignite.engine.Events` or any `event_name` added by
182 :meth:`~ignite.engine.Engine.register_events`.
183 closing_event_name: event's name on which the progress bar is closed. Valid events are from
184 :class:`~ignite.engine.Events` or any `event_name` added by
185 :meth:`~ignite.engine.Engine.register_events`.
186
187 """
188 def __init__(self, description, metric_names=None, output_transform=None,
189 event_name=Events.ITERATION_COMPLETED,
190 closing_event_name=Events.EPOCH_COMPLETED):
191 if metric_names is None and output_transform is None:
192 # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
193 metric_names = []
194 super(_OutputHandler, self).__init__(description, metric_names, output_transform, another_engine=None)
195 self.event_name = event_name
196 self.closing_event_name = closing_event_name
197
198 @staticmethod
199 def get_max_number_events(event_name, engine):
200 if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
201 return len(engine.state.dataloader)
202 if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
203 return engine.state.max_epochs
204 return 1
205
206 def __call__(self, engine, logger, event_name):
207
208 if logger.pbar is None:
209 logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))
210
211 desc = self.tag
212 max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
213 if max_num_of_closing_events > 1:
214 global_step = engine.state.get_event_attrib_value(self.closing_event_name)
215 desc += " [{}/{}]".format(global_step, max_num_of_closing_events)
216 logger.pbar.set_description(desc)
217
218 metrics = self._setup_output_metrics(engine)
219
220 rendered_metrics = {}
221 for key, value in metrics.items():
222 if isinstance(value, numbers.Number) or \
223 isinstance(value, torch.Tensor) and value.ndimension() == 0:
224 rendered_metrics[key] = "{:.2e}".format(value)
225 elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
226 for i, v in enumerate(value):
227 k = "{}_{}".format(key, i)
228 rendered_metrics[k] = "{:.2e}".format(v)
229 else:
230 warnings.warn("ProgressBar can not log "
231 "metrics value type {}".format(type(value)))
232
233 if rendered_metrics:
234 logger.pbar.set_postfix(**rendered_metrics)
235
236 logger.pbar.update()
237
[end of ignite/contrib/handlers/tqdm_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -114,7 +114,8 @@
)
def _close(self, engine):
- self.pbar.close()
+ if self.pbar:
+ self.pbar.close()
self.pbar = None
@staticmethod
| {"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -114,7 +114,8 @@\n )\n \n def _close(self, engine):\n- self.pbar.close()\n+ if self.pbar:\n+ self.pbar.close()\n self.pbar = None\n \n @staticmethod\n", "issue": "Bug with ProgressBar with TerminateOnNan\nIf we attach `ProgressBar` and `TerminateOnNaN` handlers to a trainer and `TerminateOnNan` stops training on the first iteration. We have the following error:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-36-b4ac10e6ccc4> in <module>\r\n----> 1 trainer.run(train_ab_loader, max_epochs=200)\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)\r\n 357 except BaseException as e:\r\n 358 self._logger.error(\"Engine run is terminating due to exception: %s.\", str(e))\r\n--> 359 self._handle_exception(e)\r\n 360 \r\n 361 return self.state\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _handle_exception(self, e)\r\n 322 self._fire_event(Events.EXCEPTION_RAISED, e)\r\n 323 else:\r\n--> 324 raise e\r\n 325 \r\n 326 def run(self, data, max_epochs=1):\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)\r\n 350 self._fire_event(Events.EPOCH_COMPLETED)\r\n 351 \r\n--> 352 self._fire_event(Events.COMPLETED)\r\n 353 time_taken = time.time() - start_time\r\n 354 hours, mins, secs = _to_hours_mins_secs(time_taken)\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _fire_event(self, event_name, *event_args, **event_kwargs)\r\n 257 for func, args, kwargs in self._event_handlers[event_name]:\r\n 258 kwargs.update(event_kwargs)\r\n--> 259 func(self, *(event_args + args), **kwargs)\r\n 260 \r\n 261 def fire_event(self, event_name):\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/contrib/handlers/tqdm_logger.py in _close(self, engine)\r\n 115 \r\n 116 def _close(self, engine):\r\n--> 117 self.pbar.close()\r\n 118 self.pbar = None\r\n 119 \r\n\r\nAttributeError: 'NoneType' object has no attribute 'close'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport numbers\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\n\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n i1 = ProgressBar.events_order.index(event1)\n i2 = ProgressBar.events_order.index(event2)\n return i1 < i2\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list, optional): list of the metrics names to log as the bar progresses\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not (event_name in Events and closing_event_name in Events):\n raise ValueError(\"Logging and closing events should be only ignite.engine.Events\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n event_name=event_name,\n closing_event_name=closing_event_name)\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot\n with corresponding keys.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n def __init__(self, description, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform, another_engine=None)\n self.event_name = event_name\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n if logger.pbar is None:\n logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, numbers.Number) or \\\n isinstance(value, torch.Tensor) and value.ndimension() == 0:\n rendered_metrics[key] = \"{:.2e}\".format(value)\n elif isinstance(value, torch.Tensor) and value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = \"{:.2e}\".format(v)\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"metrics value type {}\".format(type(value)))\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n logger.pbar.update()\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]} | 3,975 | 117 |
gh_patches_debug_20930 | rasdani/github-patches | git_diff | wright-group__WrightTools-543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
multiplication doesn't seem to work in d.transform
`d.transform('w2+w2+w2', 'w1', 'd2')` works.
`d.transform('3*w2', 'w1', 'd2')` does not work (even with varying spacing around the multiplication operator)
Part of the error that is raised:
```
File "/home/darien/source/WrightTools/WrightTools/data/_data.py", line 1306, in transform
axis = current.get(expression, Axis(self, expression))
File "/home/darien/source/WrightTools/WrightTools/data/_axis.py", line 53, in __init__
self.units = self.variables[0].units
File "/home/darien/source/WrightTools/WrightTools/data/_axis.py", line 152, in variables
return self._variables
AttributeError: 'Axis' object has no attribute '_variables'
```
</issue>
<code>
[start of WrightTools/data/_axis.py]
1 """Axis class and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import re
8 import numexpr
9 import operator
10 import functools
11
12 import numpy as np
13
14 from .. import exceptions as wt_exceptions
15 from .. import kit as wt_kit
16 from .. import units as wt_units
17
18
19 # --- define --------------------------------------------------------------------------------------
20
21
22 operator_to_identifier = {}
23 operator_to_identifier['/'] = '__d__'
24 operator_to_identifier['='] = '__e__'
25 operator_to_identifier['-'] = '__m__'
26 operator_to_identifier['+'] = '__p__'
27 operator_to_identifier['*'] = '__t__'
28 identifier_to_operator = {value: key for key, value in operator_to_identifier.items()}
29 operators = ''.join(operator_to_identifier.keys())
30
31
32 # --- class ---------------------------------------------------------------------------------------
33
34
35 class Axis(object):
36 """Axis class."""
37
38 def __init__(self, parent, expression, units=None):
39 """Data axis.
40
41 Parameters
42 ----------
43 parent : WrightTools.Data
44 Parent data object.
45 expression : string
46 Axis expression.
47 units : string (optional)
48 Axis units. Default is None.
49 """
50 self.parent = parent
51 self.expression = expression
52 if units is None:
53 self.units = self.variables[0].units
54 else:
55 self.units = units
56
57 def __getitem__(self, index):
58 vs = {}
59 for variable in self.variables:
60 arr = variable[index]
61 vs[variable.natural_name] = wt_units.converter(arr, variable.units, self.units)
62 return numexpr.evaluate(self.expression.split('=')[0], local_dict=vs)
63
64 def __repr__(self):
65 return '<WrightTools.Axis {0} ({1}) at {2}>'.format(self.expression, str(self.units),
66 id(self))
67
68 @property
69 def _leaf(self):
70 out = self.expression
71 if self.units is not None:
72 out += ' ({0}) {1}'.format(self.units, self.shape)
73 return out
74
75 @property
76 def full(self):
77 arr = self[:]
78 for i in range(arr.ndim):
79 if arr.shape[i] == 1:
80 arr = np.repeat(arr, self.parent.shape[i], axis=i)
81 return arr
82
83 @property
84 def identity(self):
85 return self.natural_name + ' {%s}' % self.units
86
87 @property
88 def label(self):
89 symbol = wt_units.get_symbol(self.units)
90 label = r'$\mathsf{' + self.expression
91 for v in self.variables:
92 label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))
93 if self.units_kind:
94 units_dictionary = getattr(wt_units, self.units_kind)
95 label += r'\,'
96 label += r'\left('
97 label += units_dictionary[self.units][2]
98 label += r'\right)'
99 else:
100 pass
101 label += r'}$'
102 return label
103
104 @property
105 def natural_name(self):
106 name = self.expression.strip()
107 for op in operators:
108 name = name.replace(op, operator_to_identifier[op])
109 return name
110
111 @property
112 def ndim(self):
113 """Get number of dimensions."""
114 try:
115 assert self._ndim is not None
116 except (AssertionError, AttributeError):
117 self._ndim = self.variables[0].ndim
118 finally:
119 return self._ndim
120
121 @property
122 def points(self):
123 """Squeezed array."""
124 return np.squeeze(self[:])
125
126 @property
127 def shape(self):
128 """Shape."""
129 return wt_kit.joint_shape(*self.variables)
130
131 @property
132 def size(self):
133 """Size."""
134 return functools.reduce(operator.mul, self.shape)
135
136 @property
137 def units_kind(self):
138 """Units kind."""
139 return wt_units.kind(self.units)
140
141 @property
142 def variables(self):
143 """Variables."""
144 try:
145 assert self._variables is not None
146 except (AssertionError, AttributeError):
147 pattern = '|'.join(map(re.escape, operators))
148 keys = re.split(pattern, self.expression)
149 indices = [self.parent.variable_names.index(key) for key in keys]
150 self._variables = [self.parent.variables[i] for i in indices]
151 finally:
152 return self._variables
153
154 def convert(self, destination_units, *, convert_variables=False):
155 """Convert axis to destination_units.
156
157 Parameters
158 ----------
159 destination_units : string
160 Destination units.
161 convert_variables : boolean (optional)
162 Toggle conversion of stored arrays. Default is False.
163 """
164 if not wt_units.is_valid_conversion(self.units, destination_units):
165 kind = wt_units.kind(self.units)
166 valid = list(wt_units.dicts[kind].keys())
167 raise wt_exceptions.UnitsError(valid, destination_units)
168 if convert_variables:
169 for v in self.variables:
170 v.convert(destination_units)
171 self.units = destination_units
172
173 def max(self):
174 """Axis max."""
175 return np.max(self[:])
176
177 def min(self):
178 """Axis min."""
179 return np.min(self[:])
180
[end of WrightTools/data/_axis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/data/_axis.py b/WrightTools/data/_axis.py
--- a/WrightTools/data/_axis.py
+++ b/WrightTools/data/_axis.py
@@ -106,7 +106,7 @@
name = self.expression.strip()
for op in operators:
name = name.replace(op, operator_to_identifier[op])
- return name
+ return wt_kit.string2identifier(name)
@property
def ndim(self):
@@ -146,7 +146,10 @@
except (AssertionError, AttributeError):
pattern = '|'.join(map(re.escape, operators))
keys = re.split(pattern, self.expression)
- indices = [self.parent.variable_names.index(key) for key in keys]
+ indices = []
+ for key in keys:
+ if key in self.parent.variable_names:
+ indices.append(self.parent.variable_names.index(key))
self._variables = [self.parent.variables[i] for i in indices]
finally:
return self._variables
| {"golden_diff": "diff --git a/WrightTools/data/_axis.py b/WrightTools/data/_axis.py\n--- a/WrightTools/data/_axis.py\n+++ b/WrightTools/data/_axis.py\n@@ -106,7 +106,7 @@\n name = self.expression.strip()\n for op in operators:\n name = name.replace(op, operator_to_identifier[op])\n- return name\n+ return wt_kit.string2identifier(name)\n \n @property\n def ndim(self):\n@@ -146,7 +146,10 @@\n except (AssertionError, AttributeError):\n pattern = '|'.join(map(re.escape, operators))\n keys = re.split(pattern, self.expression)\n- indices = [self.parent.variable_names.index(key) for key in keys]\n+ indices = []\n+ for key in keys:\n+ if key in self.parent.variable_names:\n+ indices.append(self.parent.variable_names.index(key))\n self._variables = [self.parent.variables[i] for i in indices]\n finally:\n return self._variables\n", "issue": "multiplication doesn't seem to work in d.transform\n`d.transform('w2+w2+w2', 'w1', 'd2')` works.\r\n\r\n`d.transform('3*w2', 'w1', 'd2')` does not work (even with varying spacing around the multiplication operator)\r\nPart of the error that is raised:\r\n```\r\n File \"/home/darien/source/WrightTools/WrightTools/data/_data.py\", line 1306, in transform\r\n axis = current.get(expression, Axis(self, expression))\r\n\r\n File \"/home/darien/source/WrightTools/WrightTools/data/_axis.py\", line 53, in __init__\r\n self.units = self.variables[0].units\r\n\r\n File \"/home/darien/source/WrightTools/WrightTools/data/_axis.py\", line 152, in variables\r\n return self._variables\r\n\r\nAttributeError: 'Axis' object has no attribute '_variables'\r\n```\n", "before_files": [{"content": "\"\"\"Axis class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport re\nimport numexpr\nimport operator\nimport functools\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\nfrom .. import kit as wt_kit\nfrom .. import units as wt_units\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\noperator_to_identifier = {}\noperator_to_identifier['/'] = '__d__'\noperator_to_identifier['='] = '__e__'\noperator_to_identifier['-'] = '__m__'\noperator_to_identifier['+'] = '__p__'\noperator_to_identifier['*'] = '__t__'\nidentifier_to_operator = {value: key for key, value in operator_to_identifier.items()}\noperators = ''.join(operator_to_identifier.keys())\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Axis(object):\n \"\"\"Axis class.\"\"\"\n\n def __init__(self, parent, expression, units=None):\n \"\"\"Data axis.\n\n Parameters\n ----------\n parent : WrightTools.Data\n Parent data object.\n expression : string\n Axis expression.\n units : string (optional)\n Axis units. Default is None.\n \"\"\"\n self.parent = parent\n self.expression = expression\n if units is None:\n self.units = self.variables[0].units\n else:\n self.units = units\n\n def __getitem__(self, index):\n vs = {}\n for variable in self.variables:\n arr = variable[index]\n vs[variable.natural_name] = wt_units.converter(arr, variable.units, self.units)\n return numexpr.evaluate(self.expression.split('=')[0], local_dict=vs)\n\n def __repr__(self):\n return '<WrightTools.Axis {0} ({1}) at {2}>'.format(self.expression, str(self.units),\n id(self))\n\n @property\n def _leaf(self):\n out = self.expression\n if self.units is not None:\n out += ' ({0}) {1}'.format(self.units, self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def identity(self):\n return self.natural_name + ' {%s}' % self.units\n\n @property\n def label(self):\n symbol = wt_units.get_symbol(self.units)\n label = r'$\\mathsf{' + self.expression\n for v in self.variables:\n label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))\n if self.units_kind:\n units_dictionary = getattr(wt_units, self.units_kind)\n label += r'\\,'\n label += r'\\left('\n label += units_dictionary[self.units][2]\n label += r'\\right)'\n else:\n pass\n label += r'}$'\n return label\n\n @property\n def natural_name(self):\n name = self.expression.strip()\n for op in operators:\n name = name.replace(op, operator_to_identifier[op])\n return name\n\n @property\n def ndim(self):\n \"\"\"Get number of dimensions.\"\"\"\n try:\n assert self._ndim is not None\n except (AssertionError, AttributeError):\n self._ndim = self.variables[0].ndim\n finally:\n return self._ndim\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def shape(self):\n \"\"\"Shape.\"\"\"\n return wt_kit.joint_shape(*self.variables)\n\n @property\n def size(self):\n \"\"\"Size.\"\"\"\n return functools.reduce(operator.mul, self.shape)\n\n @property\n def units_kind(self):\n \"\"\"Units kind.\"\"\"\n return wt_units.kind(self.units)\n\n @property\n def variables(self):\n \"\"\"Variables.\"\"\"\n try:\n assert self._variables is not None\n except (AssertionError, AttributeError):\n pattern = '|'.join(map(re.escape, operators))\n keys = re.split(pattern, self.expression)\n indices = [self.parent.variable_names.index(key) for key in keys]\n self._variables = [self.parent.variables[i] for i in indices]\n finally:\n return self._variables\n\n def convert(self, destination_units, *, convert_variables=False):\n \"\"\"Convert axis to destination_units.\n\n Parameters\n ----------\n destination_units : string\n Destination units.\n convert_variables : boolean (optional)\n Toggle conversion of stored arrays. Default is False.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if convert_variables:\n for v in self.variables:\n v.convert(destination_units)\n self.units = destination_units\n\n def max(self):\n \"\"\"Axis max.\"\"\"\n return np.max(self[:])\n\n def min(self):\n \"\"\"Axis min.\"\"\"\n return np.min(self[:])\n", "path": "WrightTools/data/_axis.py"}]} | 2,271 | 226 |
gh_patches_debug_13642 | rasdani/github-patches | git_diff | mdn__kuma-6547 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/ko/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype
# Request type
<!-- Select the appropriate option -->
- [ ] Please close this issue, I accidentally submitted it without adding any details
- [ ] New documentation
- [x] Correction or update
# Details
I was trying to make it redirect to the main `Array` document, but I can't edit it. (404)
Seems like that if the upstream document (`/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype`) is removed, localized ones throw 404 in `$edit`.
</issue>
<code>
[start of kuma/wiki/views/translate.py]
1 from urllib.parse import urlencode
2
3 from csp.decorators import csp_update
4 from django.conf import settings
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.http import Http404, JsonResponse
7 from django.shortcuts import get_object_or_404, redirect, render
8 from django.utils.safestring import mark_safe
9 from django.utils.translation import ugettext_lazy as _
10 from django.views.decorators.cache import never_cache
11
12 import kuma.wiki.content
13 from kuma.attachments.forms import AttachmentRevisionForm
14 from kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required
15 from kuma.core.i18n import get_language_mapping
16 from kuma.core.urlresolvers import reverse
17 from kuma.core.utils import get_object_or_none, smart_int, urlparams
18
19 from .utils import document_form_initial, split_slug
20 from ..decorators import check_readonly, prevent_indexing, process_document_path
21 from ..forms import DocumentForm, RevisionForm
22 from ..models import Document, Revision
23
24
25 @ensure_wiki_domain
26 @never_cache
27 @block_user_agents
28 @login_required
29 @process_document_path
30 def select_locale(request, document_slug, document_locale):
31 """
32 Select a locale to translate the document to.
33 """
34 doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)
35 return render(request, "wiki/select_locale.html", {"document": doc})
36
37
38 @ensure_wiki_domain
39 @never_cache
40 @block_user_agents
41 @login_required
42 @csp_update(SCRIPT_SRC="'unsafe-eval'") # Required until CKEditor 4.7
43 @process_document_path
44 @check_readonly
45 @prevent_indexing
46 def translate(request, document_slug, document_locale):
47 """
48 Create a new translation of a wiki document.
49
50 * document_slug is for the default locale
51 * translation is to the request locale
52 """
53 # TODO: Refactor this view into two views? (new, edit)
54 # That might help reduce the headache-inducing branchiness.
55
56 # The parent document to translate from
57 try:
58 # Use '.all_objects' because the parent might have been soft deleted.
59 # And if we don't respect that fact, it would become impossible to
60 # edit a the child of it.
61 parent_doc = Document.all_objects.get(
62 locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug
63 )
64 except Document.DoesNotExist:
65 raise Http404("Parent document does not exist")
66
67 # Get the mapping here and now so it can be used for input validation
68 language_mapping = get_language_mapping()
69
70 # HACK: Seems weird, but sticking the translate-to locale in a query
71 # param is the best way to avoid the MindTouch-legacy locale
72 # redirection logic.
73 document_locale = request.GET.get("tolocale", document_locale)
74 if document_locale.lower() not in language_mapping:
75 # The 'tolocale' query string parameters aren't free-text. They're
76 # explicitly listed on the "Select language" page (`...$locales`)
77 # If a locale was entered that wasn't a link it's a user bug.
78 raise Http404
79
80 # Set a "Discard Changes" page
81 discard_href = ""
82
83 if settings.WIKI_DEFAULT_LANGUAGE == document_locale:
84 # Don't translate to the default language.
85 return redirect(
86 reverse(
87 "wiki.edit",
88 locale=settings.WIKI_DEFAULT_LANGUAGE,
89 args=[parent_doc.slug],
90 )
91 )
92
93 if not parent_doc.is_localizable:
94 message = _("You cannot translate this document.")
95 context = {"message": message}
96 return render(request, "handlers/400.html", context, status=400)
97
98 based_on_rev = parent_doc.current_or_latest_revision()
99
100 disclose_description = bool(request.GET.get("opendescription"))
101
102 try:
103 doc = parent_doc.translations.get(locale=document_locale)
104 slug_dict = split_slug(doc.slug)
105 except Document.DoesNotExist:
106 doc = None
107 disclose_description = True
108 slug_dict = split_slug(document_slug)
109
110 # Find the "real" parent topic, which is its translation
111 if parent_doc.parent_topic:
112 try:
113 parent_topic_translated_doc = parent_doc.parent_topic.translations.get(
114 locale=document_locale
115 )
116 slug_dict = split_slug(
117 parent_topic_translated_doc.slug + "/" + slug_dict["specific"]
118 )
119 except ObjectDoesNotExist:
120 pass
121
122 user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))
123
124 doc_form = None
125 if user_has_doc_perm:
126 if doc:
127 # If there's an existing doc, populate form from it.
128 discard_href = doc.get_absolute_url()
129 doc.slug = slug_dict["specific"]
130 doc_initial = document_form_initial(doc)
131 else:
132 # If no existing doc, bring over the original title and slug.
133 discard_href = parent_doc.get_absolute_url()
134 doc_initial = {"title": based_on_rev.title, "slug": slug_dict["specific"]}
135 doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict["parent"])
136
137 initial = {
138 "based_on": based_on_rev.id,
139 "current_rev": doc.current_or_latest_revision().id if doc else None,
140 "comment": "",
141 "toc_depth": based_on_rev.toc_depth,
142 "localization_tags": ["inprogress"],
143 }
144 content = None
145 if not doc:
146 content = based_on_rev.content
147 if content:
148 # TODO: There will be no need to "filterEditorSafety" when the code
149 # that calls "clean_content" on Revision.save is deployed to
150 # production, AND the current revisions of all docs have had
151 # their content cleaned with "clean_content".
152 initial.update(
153 content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()
154 )
155 instance = doc and doc.current_or_latest_revision()
156 rev_form = RevisionForm(
157 request=request,
158 instance=instance,
159 initial=initial,
160 parent_slug=slug_dict["parent"],
161 )
162
163 if request.method == "POST":
164 which_form = request.POST.get("form-type", "both")
165 doc_form_invalid = False
166
167 # Grab the posted slug value in case it's invalid
168 posted_slug = request.POST.get("slug", slug_dict["specific"])
169
170 if user_has_doc_perm and which_form in ["doc", "both"]:
171 disclose_description = True
172 post_data = request.POST.copy()
173
174 post_data.update({"locale": document_locale})
175
176 doc_form = DocumentForm(
177 post_data, instance=doc, parent_slug=slug_dict["parent"]
178 )
179 doc_form.instance.locale = document_locale
180 doc_form.instance.parent = parent_doc
181
182 if which_form == "both":
183 # Sending a new copy of post so the slug change above
184 # doesn't cause problems during validation
185 rev_form = RevisionForm(
186 request=request, data=post_data, parent_slug=slug_dict["parent"]
187 )
188
189 # If we are submitting the whole form, we need to check that
190 # the Revision is valid before saving the Document.
191 if doc_form.is_valid() and (which_form == "doc" or rev_form.is_valid()):
192 doc = doc_form.save(parent=parent_doc)
193
194 if which_form == "doc":
195 url = urlparams(doc.get_edit_url(), opendescription=1)
196 return redirect(url)
197 else:
198 doc_form.data["slug"] = posted_slug
199 doc_form_invalid = True
200
201 if doc and which_form in ["rev", "both"]:
202 post_data = request.POST.copy()
203 if "slug" not in post_data:
204 post_data["slug"] = posted_slug
205
206 # update the post data with the toc_depth of original
207 post_data["toc_depth"] = based_on_rev.toc_depth
208
209 # Pass in the locale for the akistmet "blog_lang".
210 post_data["locale"] = document_locale
211
212 rev_form = RevisionForm(
213 request=request, data=post_data, parent_slug=slug_dict["parent"]
214 )
215 rev_form.instance.document = doc # for rev_form.clean()
216
217 if rev_form.is_valid() and not doc_form_invalid:
218 parent_id = request.POST.get("parent_id", "")
219
220 # Attempt to set a parent
221 if parent_id:
222 try:
223 parent_doc = get_object_or_404(Document, id=parent_id)
224 rev_form.instance.document.parent = parent_doc
225 doc.parent = parent_doc
226 rev_form.instance.based_on.document = doc.original
227 except Document.DoesNotExist:
228 pass
229
230 rev_form.save(doc)
231 # If this is an Ajax POST, then return a JsonResponse
232 if request.is_ajax():
233 data = {
234 "error": False,
235 "new_revision_id": rev_form.instance.id,
236 }
237
238 return JsonResponse(data)
239
240 # Construct the redirect URL, adding any needed parameters
241 url = doc.get_absolute_url()
242 params = {}
243 # Parameter for the document saved, so that we can delete the cached draft on load
244 params["rev_saved"] = request.POST.get("current_rev", "")
245 url = "%s?%s" % (url, urlencode(params))
246 return redirect(url)
247 else:
248 # If this is an Ajax POST, then return a JsonResponse with error
249 if request.is_ajax():
250 if "current_rev" in rev_form._errors:
251 # Make the error message safe so the '<' and '>' don't
252 # get turned into '<' and '>', respectively
253 rev_form.errors["current_rev"][0] = mark_safe(
254 rev_form.errors["current_rev"][0]
255 )
256 errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]
257 data = {
258 "error": True,
259 "error_message": errors,
260 "new_revision_id": rev_form.instance.id,
261 }
262 return JsonResponse(data=data)
263
264 if doc:
265 from_id = smart_int(request.GET.get("from"), None)
266 to_id = smart_int(request.GET.get("to"), None)
267
268 revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)
269 revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)
270 else:
271 revision_from = revision_to = None
272
273 parent_split = split_slug(parent_doc.slug)
274
275 language = language_mapping[document_locale.lower()]
276 default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]
277
278 context = {
279 "parent": parent_doc,
280 "document": doc,
281 "document_form": doc_form,
282 "revision_form": rev_form,
283 "locale": document_locale,
284 "default_locale": default_locale,
285 "language": language,
286 "based_on": based_on_rev,
287 "disclose_description": disclose_description,
288 "discard_href": discard_href,
289 "attachment_form": AttachmentRevisionForm(),
290 "specific_slug": parent_split["specific"],
291 "parent_slug": parent_split["parent"],
292 "revision_from": revision_from,
293 "revision_to": revision_to,
294 }
295 return render(request, "wiki/translate.html", context)
296
[end of kuma/wiki/views/translate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py
--- a/kuma/wiki/views/translate.py
+++ b/kuma/wiki/views/translate.py
@@ -220,7 +220,10 @@
# Attempt to set a parent
if parent_id:
try:
- parent_doc = get_object_or_404(Document, id=parent_id)
+ try:
+ parent_doc = Document.all_objects.get(id=parent_id)
+ except Document.DoesNotExist:
+ raise Http404("Parent document does not exist")
rev_form.instance.document.parent = parent_doc
doc.parent = parent_doc
rev_form.instance.based_on.document = doc.original
| {"golden_diff": "diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py\n--- a/kuma/wiki/views/translate.py\n+++ b/kuma/wiki/views/translate.py\n@@ -220,7 +220,10 @@\n # Attempt to set a parent\n if parent_id:\n try:\n- parent_doc = get_object_or_404(Document, id=parent_id)\n+ try:\n+ parent_doc = Document.all_objects.get(id=parent_id)\n+ except Document.DoesNotExist:\n+ raise Http404(\"Parent document does not exist\")\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n", "issue": "/ko/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype\n# Request type\r\n<!-- Select the appropriate option -->\r\n- [ ] Please close this issue, I accidentally submitted it without adding any details\r\n- [ ] New documentation\r\n- [x] Correction or update\r\n\r\n\r\n# Details\r\nI was trying to make it redirect to the main `Array` document, but I can't edit it. (404)\r\nSeems like that if the upstream document (`/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype`) is removed, localized ones throw 404 in `$edit`.\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.cache import never_cache\n\nimport kuma.wiki.content\nfrom kuma.attachments.forms import AttachmentRevisionForm\nfrom kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required\nfrom kuma.core.i18n import get_language_mapping\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import get_object_or_none, smart_int, urlparams\n\nfrom .utils import document_form_initial, split_slug\nfrom ..decorators import check_readonly, prevent_indexing, process_document_path\nfrom ..forms import DocumentForm, RevisionForm\nfrom ..models import Document, Revision\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@process_document_path\ndef select_locale(request, document_slug, document_locale):\n \"\"\"\n Select a locale to translate the document to.\n \"\"\"\n doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n return render(request, \"wiki/select_locale.html\", {\"document\": doc})\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@csp_update(SCRIPT_SRC=\"'unsafe-eval'\") # Required until CKEditor 4.7\n@process_document_path\n@check_readonly\n@prevent_indexing\ndef translate(request, document_slug, document_locale):\n \"\"\"\n Create a new translation of a wiki document.\n\n * document_slug is for the default locale\n * translation is to the request locale\n \"\"\"\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n\n # The parent document to translate from\n try:\n # Use '.all_objects' because the parent might have been soft deleted.\n # And if we don't respect that fact, it would become impossible to\n # edit a the child of it.\n parent_doc = Document.all_objects.get(\n locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug\n )\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n\n # Get the mapping here and now so it can be used for input validation\n language_mapping = get_language_mapping()\n\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.GET.get(\"tolocale\", document_locale)\n if document_locale.lower() not in language_mapping:\n # The 'tolocale' query string parameters aren't free-text. They're\n # explicitly listed on the \"Select language\" page (`...$locales`)\n # If a locale was entered that wasn't a link it's a user bug.\n raise Http404\n\n # Set a \"Discard Changes\" page\n discard_href = \"\"\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return redirect(\n reverse(\n \"wiki.edit\",\n locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.slug],\n )\n )\n\n if not parent_doc.is_localizable:\n message = _(\"You cannot translate this document.\")\n context = {\"message\": message}\n return render(request, \"handlers/400.html\", context, status=400)\n\n based_on_rev = parent_doc.current_or_latest_revision()\n\n disclose_description = bool(request.GET.get(\"opendescription\"))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n if parent_doc.parent_topic:\n try:\n parent_topic_translated_doc = parent_doc.parent_topic.translations.get(\n locale=document_locale\n )\n slug_dict = split_slug(\n parent_topic_translated_doc.slug + \"/\" + slug_dict[\"specific\"]\n )\n except ObjectDoesNotExist:\n pass\n\n user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))\n\n doc_form = None\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict[\"specific\"]\n doc_initial = document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {\"title\": based_on_rev.title, \"slug\": slug_dict[\"specific\"]}\n doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict[\"parent\"])\n\n initial = {\n \"based_on\": based_on_rev.id,\n \"current_rev\": doc.current_or_latest_revision().id if doc else None,\n \"comment\": \"\",\n \"toc_depth\": based_on_rev.toc_depth,\n \"localization_tags\": [\"inprogress\"],\n }\n content = None\n if not doc:\n content = based_on_rev.content\n if content:\n # TODO: There will be no need to \"filterEditorSafety\" when the code\n # that calls \"clean_content\" on Revision.save is deployed to\n # production, AND the current revisions of all docs have had\n # their content cleaned with \"clean_content\".\n initial.update(\n content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()\n )\n instance = doc and doc.current_or_latest_revision()\n rev_form = RevisionForm(\n request=request,\n instance=instance,\n initial=initial,\n parent_slug=slug_dict[\"parent\"],\n )\n\n if request.method == \"POST\":\n which_form = request.POST.get(\"form-type\", \"both\")\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get(\"slug\", slug_dict[\"specific\"])\n\n if user_has_doc_perm and which_form in [\"doc\", \"both\"]:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({\"locale\": document_locale})\n\n doc_form = DocumentForm(\n post_data, instance=doc, parent_slug=slug_dict[\"parent\"]\n )\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n\n if which_form == \"both\":\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == \"doc\" or rev_form.is_valid()):\n doc = doc_form.save(parent=parent_doc)\n\n if which_form == \"doc\":\n url = urlparams(doc.get_edit_url(), opendescription=1)\n return redirect(url)\n else:\n doc_form.data[\"slug\"] = posted_slug\n doc_form_invalid = True\n\n if doc and which_form in [\"rev\", \"both\"]:\n post_data = request.POST.copy()\n if \"slug\" not in post_data:\n post_data[\"slug\"] = posted_slug\n\n # update the post data with the toc_depth of original\n post_data[\"toc_depth\"] = based_on_rev.toc_depth\n\n # Pass in the locale for the akistmet \"blog_lang\".\n post_data[\"locale\"] = document_locale\n\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n parent_id = request.POST.get(\"parent_id\", \"\")\n\n # Attempt to set a parent\n if parent_id:\n try:\n parent_doc = get_object_or_404(Document, id=parent_id)\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n except Document.DoesNotExist:\n pass\n\n rev_form.save(doc)\n # If this is an Ajax POST, then return a JsonResponse\n if request.is_ajax():\n data = {\n \"error\": False,\n \"new_revision_id\": rev_form.instance.id,\n }\n\n return JsonResponse(data)\n\n # Construct the redirect URL, adding any needed parameters\n url = doc.get_absolute_url()\n params = {}\n # Parameter for the document saved, so that we can delete the cached draft on load\n params[\"rev_saved\"] = request.POST.get(\"current_rev\", \"\")\n url = \"%s?%s\" % (url, urlencode(params))\n return redirect(url)\n else:\n # If this is an Ajax POST, then return a JsonResponse with error\n if request.is_ajax():\n if \"current_rev\" in rev_form._errors:\n # Make the error message safe so the '<' and '>' don't\n # get turned into '<' and '>', respectively\n rev_form.errors[\"current_rev\"][0] = mark_safe(\n rev_form.errors[\"current_rev\"][0]\n )\n errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]\n data = {\n \"error\": True,\n \"error_message\": errors,\n \"new_revision_id\": rev_form.instance.id,\n }\n return JsonResponse(data=data)\n\n if doc:\n from_id = smart_int(request.GET.get(\"from\"), None)\n to_id = smart_int(request.GET.get(\"to\"), None)\n\n revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)\n revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)\n else:\n revision_from = revision_to = None\n\n parent_split = split_slug(parent_doc.slug)\n\n language = language_mapping[document_locale.lower()]\n default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]\n\n context = {\n \"parent\": parent_doc,\n \"document\": doc,\n \"document_form\": doc_form,\n \"revision_form\": rev_form,\n \"locale\": document_locale,\n \"default_locale\": default_locale,\n \"language\": language,\n \"based_on\": based_on_rev,\n \"disclose_description\": disclose_description,\n \"discard_href\": discard_href,\n \"attachment_form\": AttachmentRevisionForm(),\n \"specific_slug\": parent_split[\"specific\"],\n \"parent_slug\": parent_split[\"parent\"],\n \"revision_from\": revision_from,\n \"revision_to\": revision_to,\n }\n return render(request, \"wiki/translate.html\", context)\n", "path": "kuma/wiki/views/translate.py"}]} | 3,861 | 157 |
gh_patches_debug_25133 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3634 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reset multi builds update that failed merging a side tag to pending
If a multi builds update fails to be merge in stable, the approve testing cron job will add a new comment to the update every times it run.
To avoid this we need to reset the update status to pending, but we also need to be able to move back the update to testing when the update is edited (build added or build removed).
</issue>
<code>
[start of bodhi/server/tasks/updates.py]
1 # Copyright 2015-2019 Red Hat Inc., and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "updates handler".
20
21 This module is responsible for doing value-added work "offline" that used to be
22 done when updates were submitted. Specifically, when someone submits an update
23 we used to:
24
25 - Update any bugs in bugzilla associated with the update.
26 - Check for test cases in the wiki.
27
28 Those things could sometimes take a *very* long time, especially if there were
29 lots of builds and lots of bugs in the update.
30
31 Now, update-submission breezes by those steps and simply tells the user "OK".
32 A message gets published when their update goes through, and *that* message
33 gets received here and triggers us to do all that network-laden heavy lifting.
34 """
35
36 import logging
37 import time
38
39 from bodhi.server import util, bugs as bug_module
40 from bodhi.server.config import config
41 from bodhi.server.exceptions import BodhiException
42 from bodhi.server.models import Bug, Update, UpdateType
43
44
45 log = logging.getLogger('bodhi')
46
47
48 class UpdatesHandler:
49 """
50 Perform background tasks when updates are created or edited.
51
52 This Celery task is run when an update is created or editied in the frontend,
53 and performs background tasks such as modifying Bugzilla issues (and loading information from
54 Bugzilla so we can display it to the user) and looking up wiki test cases.
55
56 Attributes:
57 db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a
58 database session.
59 handle_bugs (bool): If True, interact with Bugzilla. Else do not.
60 """
61
62 def __init__(self, *args, **kwargs):
63 """Initialize the UpdatesHandler."""
64 self.db_factory = util.transactional_session_maker()
65
66 self.handle_bugs = bool(config.get('bodhi_email'))
67 if not self.handle_bugs:
68 log.warning("No bodhi_email defined; not fetching bug details")
69
70 def run(self, api_version: int, data: dict):
71 """
72 Process the given message, updating relevant bugs and test cases.
73
74 Duplicate messages: if the server delivers the message multiple times,
75 the bugs and test cases are simply re-fetched and updated, so nothing
76 bad happens.
77
78 Args:
79 api_version: API version number.
80 data: Information about a new or edited update.
81 """
82 action = data["action"]
83 alias = data['update'].get('alias')
84
85 log.info("Updates Handler handling %s, %s" % (alias, action))
86
87 # Go to sleep for a second to try and avoid a race condition
88 # https://github.com/fedora-infra/bodhi/issues/458
89 time.sleep(1)
90
91 with self.db_factory() as session:
92 update = Update.get(alias)
93 if not update:
94 raise BodhiException("Couldn't find alias '%s' in DB" % alias)
95
96 bugs = []
97 if action == "edit":
98 for idx in data['new_bugs']:
99 bug = Bug.get(idx)
100
101 # Sanity check
102 if bug is None or bug not in update.bugs:
103 update_bugs_ids = [b.bug_id for b in update.bugs]
104 update.update_bugs(update_bugs_ids + [idx], session)
105
106 # Now, after update.update_bugs, bug with idx should exists in DB
107 bug = Bug.get(idx)
108
109 bugs.append(bug)
110
111 elif action == "testing":
112 bugs = update.bugs
113 else:
114 raise NotImplementedError("Should never get here.")
115
116 self.work_on_bugs(session, update, bugs)
117 self.fetch_test_cases(session, update)
118
119 if config['test_gating.required']:
120 with self.db_factory() as session:
121 update = Update.get(alias)
122 update.update_test_gating_status()
123
124 log.info("Updates Handler done with %s, %s" % (alias, action))
125
126 def fetch_test_cases(self, session, update):
127 """
128 Query the wiki for test cases for each package on the given update.
129
130 Args:
131 session (sqlalchemy.orm.session.Session): A database session.
132 update (bodhi.server.models.Update): The update's builds are iterated upon to find test
133 cases for their associated Packages..
134 """
135 for build in update.builds:
136 try:
137 build.package.fetch_test_cases(session)
138 except BodhiException:
139 log.warning('Error occurred during fetching testcases', exc_info=True)
140
141 def work_on_bugs(self, session, update, bugs):
142 """
143 Iterate the list of bugs, retrieving information from Bugzilla and modifying them.
144
145 Iterate the given list of bugs associated with the given update. For each bug, retrieve
146 details from Bugzilla, comment on the bug to let watchers know about the update, and mark
147 the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.
148
149 If handle_bugs is not True, return and do nothing.
150
151 Args:
152 session (sqlalchemy.orm.session.Session): A database session.
153 update (bodhi.server.models.Update): The update that the bugs are associated with.
154 bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.
155 """
156 if not self.handle_bugs:
157 log.warning("Not configured to handle bugs")
158 return
159
160 log.info("Got %i bugs to sync for %r" % (len(bugs), update.alias))
161 for bug in bugs:
162 log.info("Getting RHBZ bug %r" % bug.bug_id)
163 try:
164 rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)
165
166 log.info("Updating our details for %r" % bug.bug_id)
167 bug.update_details(rhbz_bug)
168 log.info(" Got title %r for %r" % (bug.title, bug.bug_id))
169
170 # If you set the type of your update to 'enhancement' but you
171 # attach a security bug, we automatically change the type of your
172 # update to 'security'. We need to do this first, so we don't
173 # accidentally comment on stuff that we shouldn't.
174 if bug.security:
175 log.info("Setting our UpdateType to security.")
176 update.type = UpdateType.security
177
178 log.info("Commenting on %r" % bug.bug_id)
179 comment = config['initial_bug_msg'] % (
180 update.alias, update.release.long_name, update.abs_url())
181
182 log.info("Modifying %r" % bug.bug_id)
183 bug.modified(update, comment)
184 except Exception:
185 log.warning('Error occurred during updating single bug', exc_info=True)
186
[end of bodhi/server/tasks/updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bodhi/server/tasks/updates.py b/bodhi/server/tasks/updates.py
--- a/bodhi/server/tasks/updates.py
+++ b/bodhi/server/tasks/updates.py
@@ -36,10 +36,12 @@
import logging
import time
+from sqlalchemy import func
+
from bodhi.server import util, bugs as bug_module
from bodhi.server.config import config
from bodhi.server.exceptions import BodhiException
-from bodhi.server.models import Bug, Update, UpdateType
+from bodhi.server.models import Bug, Update, UpdateType, UpdateStatus
log = logging.getLogger('bodhi')
@@ -95,6 +97,19 @@
bugs = []
if action == "edit":
+ # If editing a Pending update, all of whose builds are signed, for a release
+ # which isn't composed by Bodhi (i.e. Rawhide), move it directly to Testing.
+ if not update.release.composed_by_bodhi \
+ and update.status == UpdateStatus.pending \
+ and update.signed:
+ log.info("Every build in the update is signed, set status to testing")
+
+ update.status = UpdateStatus.testing
+ update.date_testing = func.current_timestamp()
+ update.request = None
+
+ log.info(f"Update status of {update.display_name} has been set to testing")
+
for idx in data['new_bugs']:
bug = Bug.get(idx)
| {"golden_diff": "diff --git a/bodhi/server/tasks/updates.py b/bodhi/server/tasks/updates.py\n--- a/bodhi/server/tasks/updates.py\n+++ b/bodhi/server/tasks/updates.py\n@@ -36,10 +36,12 @@\n import logging\n import time\n \n+from sqlalchemy import func\n+\n from bodhi.server import util, bugs as bug_module\n from bodhi.server.config import config\n from bodhi.server.exceptions import BodhiException\n-from bodhi.server.models import Bug, Update, UpdateType\n+from bodhi.server.models import Bug, Update, UpdateType, UpdateStatus\n \n \n log = logging.getLogger('bodhi')\n@@ -95,6 +97,19 @@\n \n bugs = []\n if action == \"edit\":\n+ # If editing a Pending update, all of whose builds are signed, for a release\n+ # which isn't composed by Bodhi (i.e. Rawhide), move it directly to Testing.\n+ if not update.release.composed_by_bodhi \\\n+ and update.status == UpdateStatus.pending \\\n+ and update.signed:\n+ log.info(\"Every build in the update is signed, set status to testing\")\n+\n+ update.status = UpdateStatus.testing\n+ update.date_testing = func.current_timestamp()\n+ update.request = None\n+\n+ log.info(f\"Update status of {update.display_name} has been set to testing\")\n+\n for idx in data['new_bugs']:\n bug = Bug.get(idx)\n", "issue": "Reset multi builds update that failed merging a side tag to pending\nIf a multi builds update fails to be merge in stable, the approve testing cron job will add a new comment to the update every times it run.\r\nTo avoid this we need to reset the update status to pending, but we also need to be able to move back the update to testing when the update is edited (build added or build removed).\n", "before_files": [{"content": "# Copyright 2015-2019 Red Hat Inc., and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"updates handler\".\n\nThis module is responsible for doing value-added work \"offline\" that used to be\ndone when updates were submitted. Specifically, when someone submits an update\nwe used to:\n\n- Update any bugs in bugzilla associated with the update.\n- Check for test cases in the wiki.\n\nThose things could sometimes take a *very* long time, especially if there were\nlots of builds and lots of bugs in the update.\n\nNow, update-submission breezes by those steps and simply tells the user \"OK\".\nA message gets published when their update goes through, and *that* message\ngets received here and triggers us to do all that network-laden heavy lifting.\n\"\"\"\n\nimport logging\nimport time\n\nfrom bodhi.server import util, bugs as bug_module\nfrom bodhi.server.config import config\nfrom bodhi.server.exceptions import BodhiException\nfrom bodhi.server.models import Bug, Update, UpdateType\n\n\nlog = logging.getLogger('bodhi')\n\n\nclass UpdatesHandler:\n \"\"\"\n Perform background tasks when updates are created or edited.\n\n This Celery task is run when an update is created or editied in the frontend,\n and performs background tasks such as modifying Bugzilla issues (and loading information from\n Bugzilla so we can display it to the user) and looking up wiki test cases.\n\n Attributes:\n db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a\n database session.\n handle_bugs (bool): If True, interact with Bugzilla. Else do not.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the UpdatesHandler.\"\"\"\n self.db_factory = util.transactional_session_maker()\n\n self.handle_bugs = bool(config.get('bodhi_email'))\n if not self.handle_bugs:\n log.warning(\"No bodhi_email defined; not fetching bug details\")\n\n def run(self, api_version: int, data: dict):\n \"\"\"\n Process the given message, updating relevant bugs and test cases.\n\n Duplicate messages: if the server delivers the message multiple times,\n the bugs and test cases are simply re-fetched and updated, so nothing\n bad happens.\n\n Args:\n api_version: API version number.\n data: Information about a new or edited update.\n \"\"\"\n action = data[\"action\"]\n alias = data['update'].get('alias')\n\n log.info(\"Updates Handler handling %s, %s\" % (alias, action))\n\n # Go to sleep for a second to try and avoid a race condition\n # https://github.com/fedora-infra/bodhi/issues/458\n time.sleep(1)\n\n with self.db_factory() as session:\n update = Update.get(alias)\n if not update:\n raise BodhiException(\"Couldn't find alias '%s' in DB\" % alias)\n\n bugs = []\n if action == \"edit\":\n for idx in data['new_bugs']:\n bug = Bug.get(idx)\n\n # Sanity check\n if bug is None or bug not in update.bugs:\n update_bugs_ids = [b.bug_id for b in update.bugs]\n update.update_bugs(update_bugs_ids + [idx], session)\n\n # Now, after update.update_bugs, bug with idx should exists in DB\n bug = Bug.get(idx)\n\n bugs.append(bug)\n\n elif action == \"testing\":\n bugs = update.bugs\n else:\n raise NotImplementedError(\"Should never get here.\")\n\n self.work_on_bugs(session, update, bugs)\n self.fetch_test_cases(session, update)\n\n if config['test_gating.required']:\n with self.db_factory() as session:\n update = Update.get(alias)\n update.update_test_gating_status()\n\n log.info(\"Updates Handler done with %s, %s\" % (alias, action))\n\n def fetch_test_cases(self, session, update):\n \"\"\"\n Query the wiki for test cases for each package on the given update.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update's builds are iterated upon to find test\n cases for their associated Packages..\n \"\"\"\n for build in update.builds:\n try:\n build.package.fetch_test_cases(session)\n except BodhiException:\n log.warning('Error occurred during fetching testcases', exc_info=True)\n\n def work_on_bugs(self, session, update, bugs):\n \"\"\"\n Iterate the list of bugs, retrieving information from Bugzilla and modifying them.\n\n Iterate the given list of bugs associated with the given update. For each bug, retrieve\n details from Bugzilla, comment on the bug to let watchers know about the update, and mark\n the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.\n\n If handle_bugs is not True, return and do nothing.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update that the bugs are associated with.\n bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.\n \"\"\"\n if not self.handle_bugs:\n log.warning(\"Not configured to handle bugs\")\n return\n\n log.info(\"Got %i bugs to sync for %r\" % (len(bugs), update.alias))\n for bug in bugs:\n log.info(\"Getting RHBZ bug %r\" % bug.bug_id)\n try:\n rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)\n\n log.info(\"Updating our details for %r\" % bug.bug_id)\n bug.update_details(rhbz_bug)\n log.info(\" Got title %r for %r\" % (bug.title, bug.bug_id))\n\n # If you set the type of your update to 'enhancement' but you\n # attach a security bug, we automatically change the type of your\n # update to 'security'. We need to do this first, so we don't\n # accidentally comment on stuff that we shouldn't.\n if bug.security:\n log.info(\"Setting our UpdateType to security.\")\n update.type = UpdateType.security\n\n log.info(\"Commenting on %r\" % bug.bug_id)\n comment = config['initial_bug_msg'] % (\n update.alias, update.release.long_name, update.abs_url())\n\n log.info(\"Modifying %r\" % bug.bug_id)\n bug.modified(update, comment)\n except Exception:\n log.warning('Error occurred during updating single bug', exc_info=True)\n", "path": "bodhi/server/tasks/updates.py"}]} | 2,703 | 323 |
gh_patches_debug_35730 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom robots.txt support?
We've talked about blowing away the protected designation, so not sure if it makes sense to put special case on the protected privacy level, but maybe a separate option for docs that shouldn't be crawled?
</issue>
<code>
[start of readthedocs/core/urls/subdomain.py]
1 """URL configurations for subdomains."""
2 from __future__ import absolute_import
3
4 from functools import reduce
5 from operator import add
6
7 from django.conf.urls import url
8 from django.conf import settings
9 from django.conf.urls.static import static
10
11 from readthedocs.core.views.serve import (
12 redirect_page_with_filename,
13 redirect_project_slug, serve_docs
14 )
15 from readthedocs.core.views import (
16 server_error_500,
17 server_error_404,
18 )
19 from readthedocs.constants import pattern_opts
20
21 handler500 = server_error_500
22 handler404 = server_error_404
23
24 subdomain_urls = [
25 url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'
26 r'page/(?P<filename>.*)$'.format(**pattern_opts),
27 redirect_page_with_filename,
28 name='docs_detail'),
29
30 url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)$').format(**pattern_opts),
31 redirect_project_slug,
32 name='redirect_project_slug'),
33
34 url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'
35 r'(?P<lang_slug>{lang_slug})/'
36 r'(?P<version_slug>{version_slug})/'
37 r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
38 serve_docs,
39 name='docs_detail'),
40 ]
41
42 groups = [subdomain_urls]
43
44 # Needed to serve media locally
45 if getattr(settings, 'DEBUG', False):
46 groups.insert(0, static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))
47
48 urlpatterns = reduce(add, groups)
49
[end of readthedocs/core/urls/subdomain.py]
[start of readthedocs/core/views/serve.py]
1 # -*- coding: utf-8 -*-
2 """
3 Doc serving from Python.
4
5 In production there are two modes,
6 * Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)
7 * Serving from private symlinks in Python (readthedocs.com only)
8
9 In development, we have two modes:
10 * Serving from public symlinks in Python
11 * Serving from private symlinks in Python
12
13 This means we should only serve from public symlinks in dev,
14 and generally default to serving from private symlinks in Python only.
15
16 Privacy
17 -------
18
19 These views will take into account the version privacy level.
20
21 Settings
22 --------
23
24 PYTHON_MEDIA (False) - Set this to True to serve docs & media from Python
25 SERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.
26 """
27
28 from __future__ import (
29 absolute_import, division, print_function, unicode_literals)
30
31 import logging
32 import mimetypes
33 import os
34 from functools import wraps
35
36 from django.conf import settings
37 from django.http import HttpResponse, HttpResponseRedirect, Http404
38 from django.shortcuts import get_object_or_404
39 from django.shortcuts import render
40 from django.utils.encoding import iri_to_uri
41 from django.views.static import serve
42
43 from readthedocs.builds.models import Version
44 from readthedocs.core.permissions import AdminPermission
45 from readthedocs.core.resolver import resolve, resolve_path
46 from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
47 from readthedocs.projects import constants
48 from readthedocs.projects.models import Project, ProjectRelationship
49
50 log = logging.getLogger(__name__)
51
52
53 def map_subproject_slug(view_func):
54 """
55 A decorator that maps a ``subproject_slug`` URL param into a Project.
56
57 :raises: Http404 if the Project doesn't exist
58
59 .. warning:: Does not take into account any kind of privacy settings.
60 """
61 @wraps(view_func)
62 def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa
63 if subproject is None and subproject_slug:
64 # Try to fetch by subproject alias first, otherwise we might end up
65 # redirected to an unrelated project.
66 try:
67 # Depends on a project passed into kwargs
68 rel = ProjectRelationship.objects.get(
69 parent=kwargs['project'],
70 alias=subproject_slug,
71 )
72 subproject = rel.child
73 except (ProjectRelationship.DoesNotExist, KeyError):
74 subproject = get_object_or_404(Project, slug=subproject_slug)
75 return view_func(request, subproject=subproject, *args, **kwargs)
76
77 return inner_view
78
79
80 def map_project_slug(view_func):
81 """
82 A decorator that maps a ``project_slug`` URL param into a Project.
83
84 :raises: Http404 if the Project doesn't exist
85
86 .. warning:: Does not take into account any kind of privacy settings.
87 """
88 @wraps(view_func)
89 def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa
90 if project is None:
91 if not project_slug:
92 project_slug = request.slug
93 try:
94 project = Project.objects.get(slug=project_slug)
95 except Project.DoesNotExist:
96 raise Http404('Project does not exist.')
97 return view_func(request, project=project, *args, **kwargs)
98
99 return inner_view
100
101
102 @map_project_slug
103 @map_subproject_slug
104 def redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument
105 """Handle / -> /en/latest/ directs on subdomains."""
106 return HttpResponseRedirect(resolve(subproject or project))
107
108
109 @map_project_slug
110 @map_subproject_slug
111 def redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa
112 """Redirect /page/file.html to /en/latest/file.html."""
113 return HttpResponseRedirect(
114 resolve(subproject or project, filename=filename))
115
116
117 def _serve_401(request, project):
118 res = render(request, '401.html')
119 res.status_code = 401
120 log.debug('Unauthorized access to {0} documentation'.format(project.slug))
121 return res
122
123
124 def _serve_file(request, filename, basepath):
125 # Serve the file from the proper location
126 if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):
127 # Serve from Python
128 return serve(request, filename, basepath)
129
130 # Serve from Nginx
131 content_type, encoding = mimetypes.guess_type(
132 os.path.join(basepath, filename))
133 content_type = content_type or 'application/octet-stream'
134 response = HttpResponse(content_type=content_type)
135 if encoding:
136 response['Content-Encoding'] = encoding
137 try:
138 iri_path = os.path.join(
139 basepath[len(settings.SITE_ROOT):],
140 filename,
141 )
142 # NGINX does not support non-ASCII characters in the header, so we
143 # convert the IRI path to URI so it's compatible with what NGINX expects
144 # as the header value.
145 # https://github.com/benoitc/gunicorn/issues/1448
146 # https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling
147 x_accel_redirect = iri_to_uri(iri_path)
148 response['X-Accel-Redirect'] = x_accel_redirect
149 except UnicodeEncodeError:
150 raise Http404
151
152 return response
153
154
155 @map_project_slug
156 @map_subproject_slug
157 def serve_docs(
158 request, project, subproject, lang_slug=None, version_slug=None,
159 filename=''):
160 """Exists to map existing proj, lang, version, filename views to the file format."""
161 if not version_slug:
162 version_slug = project.get_default_version()
163 try:
164 version = project.versions.public(request.user).get(slug=version_slug)
165 except Version.DoesNotExist:
166 # Properly raise a 404 if the version doesn't exist (or is inactive) and
167 # a 401 if it does
168 if project.versions.filter(slug=version_slug, active=True).exists():
169 return _serve_401(request, project)
170 raise Http404('Version does not exist.')
171 filename = resolve_path(
172 subproject or project, # Resolve the subproject if it exists
173 version_slug=version_slug,
174 language=lang_slug,
175 filename=filename,
176 subdomain=True, # subdomain will make it a "full" path without a URL prefix
177 )
178 if (version.privacy_level == constants.PRIVATE and
179 not AdminPermission.is_member(user=request.user, obj=project)):
180 return _serve_401(request, project)
181 return _serve_symlink_docs(
182 request,
183 filename=filename,
184 project=project,
185 privacy_level=version.privacy_level,
186 )
187
188
189 @map_project_slug
190 def _serve_symlink_docs(request, project, privacy_level, filename=''):
191 """Serve a file by symlink, or a 404 if not found."""
192 # Handle indexes
193 if filename == '' or filename[-1] == '/':
194 filename += 'index.html'
195
196 # This breaks path joining, by ignoring the root when given an "absolute" path
197 if filename[0] == '/':
198 filename = filename[1:]
199
200 log.info('Serving %s for %s', filename, project)
201
202 files_tried = []
203
204 serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])
205
206 if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa
207 public_symlink = PublicSymlink(project)
208 basepath = public_symlink.project_root
209 if os.path.exists(os.path.join(basepath, filename)):
210 return _serve_file(request, filename, basepath)
211
212 files_tried.append(os.path.join(basepath, filename))
213
214 if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa
215 # Handle private
216 private_symlink = PrivateSymlink(project)
217 basepath = private_symlink.project_root
218
219 if os.path.exists(os.path.join(basepath, filename)):
220 return _serve_file(request, filename, basepath)
221
222 files_tried.append(os.path.join(basepath, filename))
223
224 raise Http404(
225 'File not found. Tried these files: %s' % ','.join(files_tried))
226
[end of readthedocs/core/views/serve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/core/urls/subdomain.py b/readthedocs/core/urls/subdomain.py
--- a/readthedocs/core/urls/subdomain.py
+++ b/readthedocs/core/urls/subdomain.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
"""URL configurations for subdomains."""
from __future__ import absolute_import
@@ -10,7 +12,7 @@
from readthedocs.core.views.serve import (
redirect_page_with_filename,
- redirect_project_slug, serve_docs
+ redirect_project_slug, serve_docs, robots_txt,
)
from readthedocs.core.views import (
server_error_500,
@@ -22,6 +24,8 @@
handler404 = server_error_404
subdomain_urls = [
+ url(r'robots.txt$', robots_txt, name='robots_txt'),
+
url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'
r'page/(?P<filename>.*)$'.format(**pattern_opts),
redirect_page_with_filename,
diff --git a/readthedocs/core/views/serve.py b/readthedocs/core/views/serve.py
--- a/readthedocs/core/views/serve.py
+++ b/readthedocs/core/views/serve.py
@@ -223,3 +223,49 @@
raise Http404(
'File not found. Tried these files: %s' % ','.join(files_tried))
+
+
+@map_project_slug
+def robots_txt(request, project):
+ """
+ Serve custom user's defined ``/robots.txt``.
+
+ If the user added a ``robots.txt`` in the "default version" of the project,
+ we serve it directly.
+ """
+ # Use the ``robots.txt`` file from the default version configured
+ version_slug = project.get_default_version()
+ version = project.versions.get(slug=version_slug)
+
+ no_serve_robots_txt = any([
+ # If project is private or,
+ project.privacy_level == constants.PRIVATE,
+ # default version is private or,
+ version.privacy_level == constants.PRIVATE,
+ # default version is not active or,
+ not version.active,
+ # default version is not built
+ not version.built,
+ ])
+ if no_serve_robots_txt:
+ # ... we do return a 404
+ raise Http404()
+
+ filename = resolve_path(
+ project,
+ version_slug=version_slug,
+ filename='robots.txt',
+ subdomain=True, # subdomain will make it a "full" path without a URL prefix
+ )
+
+ # This breaks path joining, by ignoring the root when given an "absolute" path
+ if filename[0] == '/':
+ filename = filename[1:]
+
+ basepath = PublicSymlink(project).project_root
+ fullpath = os.path.join(basepath, filename)
+
+ if os.path.exists(fullpath):
+ return HttpResponse(open(fullpath).read(), content_type='text/plain')
+
+ return HttpResponse('User-agent: *\nAllow: /\n', content_type='text/plain')
| {"golden_diff": "diff --git a/readthedocs/core/urls/subdomain.py b/readthedocs/core/urls/subdomain.py\n--- a/readthedocs/core/urls/subdomain.py\n+++ b/readthedocs/core/urls/subdomain.py\n@@ -1,3 +1,5 @@\n+# -*- coding: utf-8 -*-\n+\n \"\"\"URL configurations for subdomains.\"\"\"\n from __future__ import absolute_import\n \n@@ -10,7 +12,7 @@\n \n from readthedocs.core.views.serve import (\n redirect_page_with_filename,\n- redirect_project_slug, serve_docs\n+ redirect_project_slug, serve_docs, robots_txt,\n )\n from readthedocs.core.views import (\n server_error_500,\n@@ -22,6 +24,8 @@\n handler404 = server_error_404\n \n subdomain_urls = [\n+ url(r'robots.txt$', robots_txt, name='robots_txt'),\n+\n url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'page/(?P<filename>.*)$'.format(**pattern_opts),\n redirect_page_with_filename,\ndiff --git a/readthedocs/core/views/serve.py b/readthedocs/core/views/serve.py\n--- a/readthedocs/core/views/serve.py\n+++ b/readthedocs/core/views/serve.py\n@@ -223,3 +223,49 @@\n \n raise Http404(\n 'File not found. Tried these files: %s' % ','.join(files_tried))\n+\n+\n+@map_project_slug\n+def robots_txt(request, project):\n+ \"\"\"\n+ Serve custom user's defined ``/robots.txt``.\n+\n+ If the user added a ``robots.txt`` in the \"default version\" of the project,\n+ we serve it directly.\n+ \"\"\"\n+ # Use the ``robots.txt`` file from the default version configured\n+ version_slug = project.get_default_version()\n+ version = project.versions.get(slug=version_slug)\n+\n+ no_serve_robots_txt = any([\n+ # If project is private or,\n+ project.privacy_level == constants.PRIVATE,\n+ # default version is private or,\n+ version.privacy_level == constants.PRIVATE,\n+ # default version is not active or,\n+ not version.active,\n+ # default version is not built\n+ not version.built,\n+ ])\n+ if no_serve_robots_txt:\n+ # ... we do return a 404\n+ raise Http404()\n+\n+ filename = resolve_path(\n+ project,\n+ version_slug=version_slug,\n+ filename='robots.txt',\n+ subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n+ )\n+\n+ # This breaks path joining, by ignoring the root when given an \"absolute\" path\n+ if filename[0] == '/':\n+ filename = filename[1:]\n+\n+ basepath = PublicSymlink(project).project_root\n+ fullpath = os.path.join(basepath, filename)\n+\n+ if os.path.exists(fullpath):\n+ return HttpResponse(open(fullpath).read(), content_type='text/plain')\n+\n+ return HttpResponse('User-agent: *\\nAllow: /\\n', content_type='text/plain')\n", "issue": "Custom robots.txt support?\nWe've talked about blowing away the protected designation, so not sure if it makes sense to put special case on the protected privacy level, but maybe a separate option for docs that shouldn't be crawled?\n", "before_files": [{"content": "\"\"\"URL configurations for subdomains.\"\"\"\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom readthedocs.core.views.serve import (\n redirect_page_with_filename,\n redirect_project_slug, serve_docs\n)\nfrom readthedocs.core.views import (\n server_error_500,\n server_error_404,\n)\nfrom readthedocs.constants import pattern_opts\n\nhandler500 = server_error_500\nhandler404 = server_error_404\n\nsubdomain_urls = [\n url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'page/(?P<filename>.*)$'.format(**pattern_opts),\n redirect_page_with_filename,\n name='docs_detail'),\n\n url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)$').format(**pattern_opts),\n redirect_project_slug,\n name='redirect_project_slug'),\n\n url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'(?P<lang_slug>{lang_slug})/'\n r'(?P<version_slug>{version_slug})/'\n r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),\n serve_docs,\n name='docs_detail'),\n]\n\ngroups = [subdomain_urls]\n\n# Needed to serve media locally\nif getattr(settings, 'DEBUG', False):\n groups.insert(0, static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/core/urls/subdomain.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDoc serving from Python.\n\nIn production there are two modes,\n* Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)\n* Serving from private symlinks in Python (readthedocs.com only)\n\nIn development, we have two modes:\n* Serving from public symlinks in Python\n* Serving from private symlinks in Python\n\nThis means we should only serve from public symlinks in dev,\nand generally default to serving from private symlinks in Python only.\n\nPrivacy\n-------\n\nThese views will take into account the version privacy level.\n\nSettings\n--------\n\nPYTHON_MEDIA (False) - Set this to True to serve docs & media from Python\nSERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.\n\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport logging\nimport mimetypes\nimport os\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\nfrom django.utils.encoding import iri_to_uri\nfrom django.views.static import serve\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.resolver import resolve, resolve_path\nfrom readthedocs.core.symlink import PrivateSymlink, PublicSymlink\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, ProjectRelationship\n\nlog = logging.getLogger(__name__)\n\n\ndef map_subproject_slug(view_func):\n \"\"\"\n A decorator that maps a ``subproject_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n @wraps(view_func)\n def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa\n if subproject is None and subproject_slug:\n # Try to fetch by subproject alias first, otherwise we might end up\n # redirected to an unrelated project.\n try:\n # Depends on a project passed into kwargs\n rel = ProjectRelationship.objects.get(\n parent=kwargs['project'],\n alias=subproject_slug,\n )\n subproject = rel.child\n except (ProjectRelationship.DoesNotExist, KeyError):\n subproject = get_object_or_404(Project, slug=subproject_slug)\n return view_func(request, subproject=subproject, *args, **kwargs)\n\n return inner_view\n\n\ndef map_project_slug(view_func):\n \"\"\"\n A decorator that maps a ``project_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n @wraps(view_func)\n def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa\n if project is None:\n if not project_slug:\n project_slug = request.slug\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n raise Http404('Project does not exist.')\n return view_func(request, project=project, *args, **kwargs)\n\n return inner_view\n\n\n@map_project_slug\n@map_subproject_slug\ndef redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument\n \"\"\"Handle / -> /en/latest/ directs on subdomains.\"\"\"\n return HttpResponseRedirect(resolve(subproject or project))\n\n\n@map_project_slug\n@map_subproject_slug\ndef redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa\n \"\"\"Redirect /page/file.html to /en/latest/file.html.\"\"\"\n return HttpResponseRedirect(\n resolve(subproject or project, filename=filename))\n\n\ndef _serve_401(request, project):\n res = render(request, '401.html')\n res.status_code = 401\n log.debug('Unauthorized access to {0} documentation'.format(project.slug))\n return res\n\n\ndef _serve_file(request, filename, basepath):\n # Serve the file from the proper location\n if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):\n # Serve from Python\n return serve(request, filename, basepath)\n\n # Serve from Nginx\n content_type, encoding = mimetypes.guess_type(\n os.path.join(basepath, filename))\n content_type = content_type or 'application/octet-stream'\n response = HttpResponse(content_type=content_type)\n if encoding:\n response['Content-Encoding'] = encoding\n try:\n iri_path = os.path.join(\n basepath[len(settings.SITE_ROOT):],\n filename,\n )\n # NGINX does not support non-ASCII characters in the header, so we\n # convert the IRI path to URI so it's compatible with what NGINX expects\n # as the header value.\n # https://github.com/benoitc/gunicorn/issues/1448\n # https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling\n x_accel_redirect = iri_to_uri(iri_path)\n response['X-Accel-Redirect'] = x_accel_redirect\n except UnicodeEncodeError:\n raise Http404\n\n return response\n\n\n@map_project_slug\n@map_subproject_slug\ndef serve_docs(\n request, project, subproject, lang_slug=None, version_slug=None,\n filename=''):\n \"\"\"Exists to map existing proj, lang, version, filename views to the file format.\"\"\"\n if not version_slug:\n version_slug = project.get_default_version()\n try:\n version = project.versions.public(request.user).get(slug=version_slug)\n except Version.DoesNotExist:\n # Properly raise a 404 if the version doesn't exist (or is inactive) and\n # a 401 if it does\n if project.versions.filter(slug=version_slug, active=True).exists():\n return _serve_401(request, project)\n raise Http404('Version does not exist.')\n filename = resolve_path(\n subproject or project, # Resolve the subproject if it exists\n version_slug=version_slug,\n language=lang_slug,\n filename=filename,\n subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n )\n if (version.privacy_level == constants.PRIVATE and\n not AdminPermission.is_member(user=request.user, obj=project)):\n return _serve_401(request, project)\n return _serve_symlink_docs(\n request,\n filename=filename,\n project=project,\n privacy_level=version.privacy_level,\n )\n\n\n@map_project_slug\ndef _serve_symlink_docs(request, project, privacy_level, filename=''):\n \"\"\"Serve a file by symlink, or a 404 if not found.\"\"\"\n # Handle indexes\n if filename == '' or filename[-1] == '/':\n filename += 'index.html'\n\n # This breaks path joining, by ignoring the root when given an \"absolute\" path\n if filename[0] == '/':\n filename = filename[1:]\n\n log.info('Serving %s for %s', filename, project)\n\n files_tried = []\n\n serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])\n\n if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa\n public_symlink = PublicSymlink(project)\n basepath = public_symlink.project_root\n if os.path.exists(os.path.join(basepath, filename)):\n return _serve_file(request, filename, basepath)\n\n files_tried.append(os.path.join(basepath, filename))\n\n if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa\n # Handle private\n private_symlink = PrivateSymlink(project)\n basepath = private_symlink.project_root\n\n if os.path.exists(os.path.join(basepath, filename)):\n return _serve_file(request, filename, basepath)\n\n files_tried.append(os.path.join(basepath, filename))\n\n raise Http404(\n 'File not found. Tried these files: %s' % ','.join(files_tried))\n", "path": "readthedocs/core/views/serve.py"}]} | 3,482 | 713 |
gh_patches_debug_13364 | rasdani/github-patches | git_diff | lutris__lutris-3987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installer didnt Start Errors in .py
i want to install some stuff like kotor with steam but get everytime the same error.
File "/usr/lib/python3.10/site-packages/lutris/gui/installerwindow.py", line 155, in on_installer_selected
self.interpreter = interpreter.ScriptInterpreter(
File "/usr/lib/python3.10/site-packages/lutris/installer/interpreter.py", line 50, in __init__
self.installer = LutrisInstaller(installer, self, service=self.service, appid=self.appid)
File "/usr/lib/python3.10/site-packages/lutris/installer/installer.py", line 31, in __init__
self.service = self.get_service(initial=service)
File "/usr/lib/python3.10/site-packages/lutris/installer/installer.py", line 47, in get_service
return SERVICES["steam"]()
KeyError: 'steam'
</issue>
<code>
[start of lutris/installer/installer.py]
1 """Lutris installer class"""
2 import json
3 import os
4 from gettext import gettext as _
5
6 from lutris.config import LutrisConfig, write_game_config
7 from lutris.database.games import add_or_update, get_game_by_field
8 from lutris.game import Game
9 from lutris.installer import AUTO_ELF_EXE, AUTO_WIN32_EXE
10 from lutris.installer.errors import ScriptingError
11 from lutris.installer.installer_file import InstallerFile
12 from lutris.installer.legacy import get_game_launcher
13 from lutris.runners import import_runner
14 from lutris.services import SERVICES
15 from lutris.util.game_finder import find_linux_game_executable, find_windows_game_executable
16 from lutris.util.log import logger
17
18
19 class LutrisInstaller: # pylint: disable=too-many-instance-attributes
20 """Represents a Lutris installer"""
21
22 def __init__(self, installer, interpreter, service, appid):
23 self.interpreter = interpreter
24 self.installer = installer
25 self.version = installer["version"]
26 self.slug = installer["slug"]
27 self.year = installer.get("year")
28 self.runner = installer["runner"]
29 self.script = installer.get("script")
30 self.game_name = installer["name"]
31 self.game_slug = installer["game_slug"]
32 self.service = self.get_service(initial=service)
33 self.service_appid = self.get_appid(installer, initial=appid)
34 self.variables = installer.get("variables", {})
35 self.files = [
36 InstallerFile(self.game_slug, file_id, file_meta)
37 for file_desc in self.script.get("files", [])
38 for file_id, file_meta in file_desc.items()
39 ]
40 self.requires = self.script.get("requires")
41 self.extends = self.script.get("extends")
42 self.game_id = self.get_game_id()
43
44 def get_service(self, initial=None):
45 if initial:
46 return initial
47 if "steam" in self.runner:
48 return SERVICES["steam"]()
49 version = self.version.lower()
50 if "humble" in version:
51 return SERVICES["humblebundle"]()
52 if "gog" in version:
53 return SERVICES["gog"]()
54
55 def get_appid(self, installer, initial=None):
56 if initial:
57 return initial
58 if not self.service:
59 return
60 if self.service.id == "steam":
61 return installer.get("steamid")
62 game_config = self.script.get("game", {})
63 if self.service.id == "gog":
64 return game_config.get("gogid") or installer.get("gogid")
65 if self.service.id == "humblebundle":
66 return game_config.get("humbleid") or installer.get("humblestoreid")
67
68 @property
69 def script_pretty(self):
70 """Return a pretty print of the script"""
71 return json.dumps(self.script, indent=4)
72
73 def get_game_id(self):
74 """Return the ID of the game in the local DB if one exists"""
75 # If the game is in the library and uninstalled, the first installation
76 # updates it
77 existing_game = get_game_by_field(self.game_slug, "slug")
78 if existing_game and not existing_game["installed"]:
79 return existing_game["id"]
80
81 @property
82 def creates_game_folder(self):
83 """Determines if an install script should create a game folder for the game"""
84 if self.requires:
85 # Game is an extension of an existing game, folder exists
86 return False
87 if self.runner == "steam":
88 # Steam games installs in their steamapps directory
89 return False
90 if (
91 self.files
92 or self.script.get("game", {}).get("gog")
93 or self.script.get("game", {}).get("prefix")
94 ):
95 return True
96 command_names = [list(c.keys())[0] for c in self.script.get("installer", [])]
97 if "insert-disc" in command_names:
98 return True
99 return False
100
101 def get_errors(self):
102 """Return potential errors in the script"""
103 errors = []
104 if not isinstance(self.script, dict):
105 errors.append("Script must be a dictionary")
106 # Return early since the method assumes a dict
107 return errors
108
109 # Check that installers contains all required fields
110 for field in ("runner", "game_name", "game_slug"):
111 if not hasattr(self, field) or not getattr(self, field):
112 errors.append("Missing field '%s'" % field)
113
114 # Check that libretro installers have a core specified
115 if self.runner == "libretro":
116 if "game" not in self.script or "core" not in self.script["game"]:
117 errors.append("Missing libretro core in game section")
118
119 # Check that Steam games have an AppID
120 if self.runner == "steam":
121 if not self.script.get("game", {}).get("appid"):
122 errors.append("Missing appid for Steam game")
123
124 # Check that installers don't contain both 'requires' and 'extends'
125 if self.script.get("requires") and self.script.get("extends"):
126 errors.append("Scripts can't have both extends and requires")
127 return errors
128
129 def pop_user_provided_file(self):
130 """Return and remove the first user provided file, which is used for game stores"""
131 for index, file in enumerate(self.files):
132 if file.url.startswith("N/A"):
133 self.files.pop(index)
134 return file.id
135
136 def prepare_game_files(self):
137 """Gathers necessary files before iterating through them."""
138 if not self.files:
139 return
140 if self.service:
141 if self.service.online and not self.service.is_connected():
142 logger.info("Not authenticated to %s", self.service.id)
143 return
144 installer_file_id = self.pop_user_provided_file()
145 if not installer_file_id:
146 logger.warning("Could not find a file for this service")
147 return
148 if self.service.has_extras:
149 self.service.selected_extras = self.interpreter.extras
150 installer_files = self.service.get_installer_files(self, installer_file_id)
151 for installer_file in installer_files:
152 self.files.append(installer_file)
153 if not installer_files:
154 # Failed to get the service game, put back a user provided file
155 self.files.insert(0, "N/A: Provider installer file")
156
157 def _substitute_config(self, script_config):
158 """Substitute values such as $GAMEDIR in a config dict."""
159 config = {}
160 for key in script_config:
161 if not isinstance(key, str):
162 raise ScriptingError(_("Game config key must be a string"), key)
163 value = script_config[key]
164 if str(value).lower() == 'true':
165 value = True
166 if str(value).lower() == 'false':
167 value = False
168 if isinstance(value, list):
169 config[key] = [self.interpreter._substitute(i) for i in value]
170 elif isinstance(value, dict):
171 config[key] = {k: self.interpreter._substitute(v) for (k, v) in value.items()}
172 elif isinstance(value, bool):
173 config[key] = value
174 else:
175 config[key] = self.interpreter._substitute(value)
176 return config
177
178 def get_game_config(self):
179 """Return the game configuration"""
180 if self.requires:
181 # Load the base game config
182 required_game = get_game_by_field(self.requires, field="installer_slug")
183 if not required_game:
184 required_game = get_game_by_field(self.requires, field="slug")
185 if not required_game:
186 raise ValueError("No game matched '%s' on installer_slug or slug" % self.requires)
187 base_config = LutrisConfig(
188 runner_slug=self.runner, game_config_id=required_game["configpath"]
189 )
190 config = base_config.game_level
191 else:
192 config = {"game": {}}
193
194 # Config update
195 if "system" in self.script:
196 config["system"] = self._substitute_config(self.script["system"])
197 if self.runner in self.script and self.script[self.runner]:
198 config[self.runner] = self._substitute_config(self.script[self.runner])
199 launcher, launcher_config = self.get_game_launcher_config(self.interpreter.game_files)
200 if launcher:
201 config["game"][launcher] = launcher_config
202
203 if "game" in self.script:
204 try:
205 config["game"].update(self.script["game"])
206 except ValueError as err:
207 raise ScriptingError(_("Invalid 'game' section"), self.script["game"]) from err
208 config["game"] = self._substitute_config(config["game"])
209 if AUTO_ELF_EXE in config["game"].get("exe", ""):
210 config["game"]["exe"] = find_linux_game_executable(self.interpreter.target_path,
211 make_executable=True)
212 elif AUTO_WIN32_EXE in config["game"].get("exe", ""):
213 config["game"]["exe"] = find_windows_game_executable(self.interpreter.target_path)
214 return config
215
216 def save(self):
217 """Write the game configuration in the DB and config file"""
218 if self.extends:
219 logger.info(
220 "This is an extension to %s, not creating a new game entry",
221 self.extends,
222 )
223 return
224 configpath = write_game_config(self.slug, self.get_game_config())
225 runner_inst = import_runner(self.runner)()
226 if self.service:
227 service_id = self.service.id
228 else:
229 service_id = None
230 self.game_id = add_or_update(
231 name=self.game_name,
232 runner=self.runner,
233 slug=self.game_slug,
234 platform=runner_inst.get_platform(),
235 directory=self.interpreter.target_path,
236 installed=1,
237 hidden=0,
238 installer_slug=self.slug,
239 parent_slug=self.requires,
240 year=self.year,
241 configpath=configpath,
242 service=service_id,
243 service_id=self.service_appid,
244 id=self.game_id,
245 )
246 # This is a bit redundant but used to trigger the game-updated signal
247 game = Game(self.game_id)
248 game.save()
249
250 def get_game_launcher_config(self, game_files):
251 """Game options such as exe or main_file can be added at the root of the
252 script as a shortcut, this integrates them into the game config properly
253 This should be deprecated. Game launchers should go in the game section.
254 """
255 launcher, launcher_value = get_game_launcher(self.script)
256 if isinstance(launcher_value, list):
257 launcher_values = []
258 for game_file in launcher_value:
259 if game_file in game_files:
260 launcher_values.append(game_files[game_file])
261 else:
262 launcher_values.append(game_file)
263 return launcher, launcher_values
264 if launcher_value:
265 if launcher_value in game_files:
266 launcher_value = game_files[launcher_value]
267 elif self.interpreter.target_path and os.path.exists(
268 os.path.join(self.interpreter.target_path, launcher_value)
269 ):
270 launcher_value = os.path.join(self.interpreter.target_path, launcher_value)
271 return launcher, launcher_value
272
[end of lutris/installer/installer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/installer/installer.py b/lutris/installer/installer.py
--- a/lutris/installer/installer.py
+++ b/lutris/installer/installer.py
@@ -44,12 +44,12 @@
def get_service(self, initial=None):
if initial:
return initial
- if "steam" in self.runner:
+ if "steam" in self.runner and "steam" in SERVICES:
return SERVICES["steam"]()
version = self.version.lower()
- if "humble" in version:
+ if "humble" in version and "humblebundle" in SERVICES:
return SERVICES["humblebundle"]()
- if "gog" in version:
+ if "gog" in version and "gog" in SERVICES:
return SERVICES["gog"]()
def get_appid(self, installer, initial=None):
| {"golden_diff": "diff --git a/lutris/installer/installer.py b/lutris/installer/installer.py\n--- a/lutris/installer/installer.py\n+++ b/lutris/installer/installer.py\n@@ -44,12 +44,12 @@\n def get_service(self, initial=None):\n if initial:\n return initial\n- if \"steam\" in self.runner:\n+ if \"steam\" in self.runner and \"steam\" in SERVICES:\n return SERVICES[\"steam\"]()\n version = self.version.lower()\n- if \"humble\" in version:\n+ if \"humble\" in version and \"humblebundle\" in SERVICES:\n return SERVICES[\"humblebundle\"]()\n- if \"gog\" in version:\n+ if \"gog\" in version and \"gog\" in SERVICES:\n return SERVICES[\"gog\"]()\n \n def get_appid(self, installer, initial=None):\n", "issue": "Installer didnt Start Errors in .py\ni want to install some stuff like kotor with steam but get everytime the same error.\r\n\r\nFile \"/usr/lib/python3.10/site-packages/lutris/gui/installerwindow.py\", line 155, in on_installer_selected\r\n self.interpreter = interpreter.ScriptInterpreter(\r\n File \"/usr/lib/python3.10/site-packages/lutris/installer/interpreter.py\", line 50, in __init__\r\n self.installer = LutrisInstaller(installer, self, service=self.service, appid=self.appid)\r\n File \"/usr/lib/python3.10/site-packages/lutris/installer/installer.py\", line 31, in __init__\r\n self.service = self.get_service(initial=service)\r\n File \"/usr/lib/python3.10/site-packages/lutris/installer/installer.py\", line 47, in get_service\r\n return SERVICES[\"steam\"]()\r\nKeyError: 'steam'\r\n\n", "before_files": [{"content": "\"\"\"Lutris installer class\"\"\"\nimport json\nimport os\nfrom gettext import gettext as _\n\nfrom lutris.config import LutrisConfig, write_game_config\nfrom lutris.database.games import add_or_update, get_game_by_field\nfrom lutris.game import Game\nfrom lutris.installer import AUTO_ELF_EXE, AUTO_WIN32_EXE\nfrom lutris.installer.errors import ScriptingError\nfrom lutris.installer.installer_file import InstallerFile\nfrom lutris.installer.legacy import get_game_launcher\nfrom lutris.runners import import_runner\nfrom lutris.services import SERVICES\nfrom lutris.util.game_finder import find_linux_game_executable, find_windows_game_executable\nfrom lutris.util.log import logger\n\n\nclass LutrisInstaller: # pylint: disable=too-many-instance-attributes\n \"\"\"Represents a Lutris installer\"\"\"\n\n def __init__(self, installer, interpreter, service, appid):\n self.interpreter = interpreter\n self.installer = installer\n self.version = installer[\"version\"]\n self.slug = installer[\"slug\"]\n self.year = installer.get(\"year\")\n self.runner = installer[\"runner\"]\n self.script = installer.get(\"script\")\n self.game_name = installer[\"name\"]\n self.game_slug = installer[\"game_slug\"]\n self.service = self.get_service(initial=service)\n self.service_appid = self.get_appid(installer, initial=appid)\n self.variables = installer.get(\"variables\", {})\n self.files = [\n InstallerFile(self.game_slug, file_id, file_meta)\n for file_desc in self.script.get(\"files\", [])\n for file_id, file_meta in file_desc.items()\n ]\n self.requires = self.script.get(\"requires\")\n self.extends = self.script.get(\"extends\")\n self.game_id = self.get_game_id()\n\n def get_service(self, initial=None):\n if initial:\n return initial\n if \"steam\" in self.runner:\n return SERVICES[\"steam\"]()\n version = self.version.lower()\n if \"humble\" in version:\n return SERVICES[\"humblebundle\"]()\n if \"gog\" in version:\n return SERVICES[\"gog\"]()\n\n def get_appid(self, installer, initial=None):\n if initial:\n return initial\n if not self.service:\n return\n if self.service.id == \"steam\":\n return installer.get(\"steamid\")\n game_config = self.script.get(\"game\", {})\n if self.service.id == \"gog\":\n return game_config.get(\"gogid\") or installer.get(\"gogid\")\n if self.service.id == \"humblebundle\":\n return game_config.get(\"humbleid\") or installer.get(\"humblestoreid\")\n\n @property\n def script_pretty(self):\n \"\"\"Return a pretty print of the script\"\"\"\n return json.dumps(self.script, indent=4)\n\n def get_game_id(self):\n \"\"\"Return the ID of the game in the local DB if one exists\"\"\"\n # If the game is in the library and uninstalled, the first installation\n # updates it\n existing_game = get_game_by_field(self.game_slug, \"slug\")\n if existing_game and not existing_game[\"installed\"]:\n return existing_game[\"id\"]\n\n @property\n def creates_game_folder(self):\n \"\"\"Determines if an install script should create a game folder for the game\"\"\"\n if self.requires:\n # Game is an extension of an existing game, folder exists\n return False\n if self.runner == \"steam\":\n # Steam games installs in their steamapps directory\n return False\n if (\n self.files\n or self.script.get(\"game\", {}).get(\"gog\")\n or self.script.get(\"game\", {}).get(\"prefix\")\n ):\n return True\n command_names = [list(c.keys())[0] for c in self.script.get(\"installer\", [])]\n if \"insert-disc\" in command_names:\n return True\n return False\n\n def get_errors(self):\n \"\"\"Return potential errors in the script\"\"\"\n errors = []\n if not isinstance(self.script, dict):\n errors.append(\"Script must be a dictionary\")\n # Return early since the method assumes a dict\n return errors\n\n # Check that installers contains all required fields\n for field in (\"runner\", \"game_name\", \"game_slug\"):\n if not hasattr(self, field) or not getattr(self, field):\n errors.append(\"Missing field '%s'\" % field)\n\n # Check that libretro installers have a core specified\n if self.runner == \"libretro\":\n if \"game\" not in self.script or \"core\" not in self.script[\"game\"]:\n errors.append(\"Missing libretro core in game section\")\n\n # Check that Steam games have an AppID\n if self.runner == \"steam\":\n if not self.script.get(\"game\", {}).get(\"appid\"):\n errors.append(\"Missing appid for Steam game\")\n\n # Check that installers don't contain both 'requires' and 'extends'\n if self.script.get(\"requires\") and self.script.get(\"extends\"):\n errors.append(\"Scripts can't have both extends and requires\")\n return errors\n\n def pop_user_provided_file(self):\n \"\"\"Return and remove the first user provided file, which is used for game stores\"\"\"\n for index, file in enumerate(self.files):\n if file.url.startswith(\"N/A\"):\n self.files.pop(index)\n return file.id\n\n def prepare_game_files(self):\n \"\"\"Gathers necessary files before iterating through them.\"\"\"\n if not self.files:\n return\n if self.service:\n if self.service.online and not self.service.is_connected():\n logger.info(\"Not authenticated to %s\", self.service.id)\n return\n installer_file_id = self.pop_user_provided_file()\n if not installer_file_id:\n logger.warning(\"Could not find a file for this service\")\n return\n if self.service.has_extras:\n self.service.selected_extras = self.interpreter.extras\n installer_files = self.service.get_installer_files(self, installer_file_id)\n for installer_file in installer_files:\n self.files.append(installer_file)\n if not installer_files:\n # Failed to get the service game, put back a user provided file\n self.files.insert(0, \"N/A: Provider installer file\")\n\n def _substitute_config(self, script_config):\n \"\"\"Substitute values such as $GAMEDIR in a config dict.\"\"\"\n config = {}\n for key in script_config:\n if not isinstance(key, str):\n raise ScriptingError(_(\"Game config key must be a string\"), key)\n value = script_config[key]\n if str(value).lower() == 'true':\n value = True\n if str(value).lower() == 'false':\n value = False\n if isinstance(value, list):\n config[key] = [self.interpreter._substitute(i) for i in value]\n elif isinstance(value, dict):\n config[key] = {k: self.interpreter._substitute(v) for (k, v) in value.items()}\n elif isinstance(value, bool):\n config[key] = value\n else:\n config[key] = self.interpreter._substitute(value)\n return config\n\n def get_game_config(self):\n \"\"\"Return the game configuration\"\"\"\n if self.requires:\n # Load the base game config\n required_game = get_game_by_field(self.requires, field=\"installer_slug\")\n if not required_game:\n required_game = get_game_by_field(self.requires, field=\"slug\")\n if not required_game:\n raise ValueError(\"No game matched '%s' on installer_slug or slug\" % self.requires)\n base_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=required_game[\"configpath\"]\n )\n config = base_config.game_level\n else:\n config = {\"game\": {}}\n\n # Config update\n if \"system\" in self.script:\n config[\"system\"] = self._substitute_config(self.script[\"system\"])\n if self.runner in self.script and self.script[self.runner]:\n config[self.runner] = self._substitute_config(self.script[self.runner])\n launcher, launcher_config = self.get_game_launcher_config(self.interpreter.game_files)\n if launcher:\n config[\"game\"][launcher] = launcher_config\n\n if \"game\" in self.script:\n try:\n config[\"game\"].update(self.script[\"game\"])\n except ValueError as err:\n raise ScriptingError(_(\"Invalid 'game' section\"), self.script[\"game\"]) from err\n config[\"game\"] = self._substitute_config(config[\"game\"])\n if AUTO_ELF_EXE in config[\"game\"].get(\"exe\", \"\"):\n config[\"game\"][\"exe\"] = find_linux_game_executable(self.interpreter.target_path,\n make_executable=True)\n elif AUTO_WIN32_EXE in config[\"game\"].get(\"exe\", \"\"):\n config[\"game\"][\"exe\"] = find_windows_game_executable(self.interpreter.target_path)\n return config\n\n def save(self):\n \"\"\"Write the game configuration in the DB and config file\"\"\"\n if self.extends:\n logger.info(\n \"This is an extension to %s, not creating a new game entry\",\n self.extends,\n )\n return\n configpath = write_game_config(self.slug, self.get_game_config())\n runner_inst = import_runner(self.runner)()\n if self.service:\n service_id = self.service.id\n else:\n service_id = None\n self.game_id = add_or_update(\n name=self.game_name,\n runner=self.runner,\n slug=self.game_slug,\n platform=runner_inst.get_platform(),\n directory=self.interpreter.target_path,\n installed=1,\n hidden=0,\n installer_slug=self.slug,\n parent_slug=self.requires,\n year=self.year,\n configpath=configpath,\n service=service_id,\n service_id=self.service_appid,\n id=self.game_id,\n )\n # This is a bit redundant but used to trigger the game-updated signal\n game = Game(self.game_id)\n game.save()\n\n def get_game_launcher_config(self, game_files):\n \"\"\"Game options such as exe or main_file can be added at the root of the\n script as a shortcut, this integrates them into the game config properly\n This should be deprecated. Game launchers should go in the game section.\n \"\"\"\n launcher, launcher_value = get_game_launcher(self.script)\n if isinstance(launcher_value, list):\n launcher_values = []\n for game_file in launcher_value:\n if game_file in game_files:\n launcher_values.append(game_files[game_file])\n else:\n launcher_values.append(game_file)\n return launcher, launcher_values\n if launcher_value:\n if launcher_value in game_files:\n launcher_value = game_files[launcher_value]\n elif self.interpreter.target_path and os.path.exists(\n os.path.join(self.interpreter.target_path, launcher_value)\n ):\n launcher_value = os.path.join(self.interpreter.target_path, launcher_value)\n return launcher, launcher_value\n", "path": "lutris/installer/installer.py"}]} | 3,816 | 200 |
gh_patches_debug_14262 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The 'user' trait of a SingleUserNotebookApp instance must be a unicode string, but a value of 1384482<class 'int'> was specified.
I'm using the `SudoSpawner` in combination with Active Directory+PAM based authentication. This results in users being created on the Linux server with numeric IDs that map to AD users. This works fine with users whos AD logins are name-based, but users with numeric IDs cannot log in.
Looking at the logs I've found that the error occurs when trying to launch the instance of `SingleUserNotebookApp` due to the user ID being interpreted as an `int` rather than `str`.
```
The 'user' trait of a SingleUserNotebookApp instance must be a unicode string,
but a value of 1384482 <class 'int'> was specified.
mfitzp@bms-jupyter:~$ /usr/bin/python3 -m jupyterhub.singleuser --user="1384482"
--port=36359 --cookie-name=jupyter-hub-token-1384482 --base-url=/user/1384482
--hub-prefix=/hub/ --hub-api-url=http://localhost:8081/hub/api --ip=localhost
```
Looking at the source the `user` property is defined as a `Unicode` traitlet so I'm unsure what is going on here? Is this perhaps a IPython issue? In `argparse` it's possible to define the type for command-line arguments to be mapped to, but I could not find the equivalent in the traitlet system.
</issue>
<code>
[start of jupyterhub/singleuser.py]
1 #!/usr/bin/env python
2 """Extend regular notebook server to be aware of multiuser things."""
3
4 # Copyright (c) Jupyter Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 import os
8 from urllib.parse import quote
9
10 import requests
11
12 from tornado import ioloop
13 from tornado.web import HTTPError
14
15 from IPython.utils.traitlets import (
16 Integer,
17 Unicode,
18 )
19
20 from IPython.html.notebookapp import NotebookApp
21 from IPython.html.auth.login import LoginHandler
22 from IPython.html.auth.logout import LogoutHandler
23
24 from IPython.html.utils import url_path_join
25
26
27 from distutils.version import LooseVersion as V
28
29 import IPython
30 if V(IPython.__version__) < V('3.0'):
31 raise ImportError("JupyterHub Requires IPython >= 3.0, found %s" % IPython.__version__)
32
33 # Define two methods to attach to AuthenticatedHandler,
34 # which authenticate via the central auth server.
35
36 class JupyterHubLoginHandler(LoginHandler):
37 @staticmethod
38 def login_available(settings):
39 return True
40
41 @staticmethod
42 def verify_token(self, cookie_name, encrypted_cookie):
43 """method for token verification"""
44 cookie_cache = self.settings['cookie_cache']
45 if encrypted_cookie in cookie_cache:
46 # we've seen this token before, don't ask upstream again
47 return cookie_cache[encrypted_cookie]
48
49 hub_api_url = self.settings['hub_api_url']
50 hub_api_key = self.settings['hub_api_key']
51 r = requests.get(url_path_join(
52 hub_api_url, "authorizations/cookie", cookie_name, quote(encrypted_cookie, safe=''),
53 ),
54 headers = {'Authorization' : 'token %s' % hub_api_key},
55 )
56 if r.status_code == 404:
57 data = None
58 elif r.status_code == 403:
59 self.log.error("I don't have permission to verify cookies, my auth token may have expired: [%i] %s", r.status_code, r.reason)
60 raise HTTPError(500, "Permission failure checking authorization, I may need to be restarted")
61 elif r.status_code >= 500:
62 self.log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason)
63 raise HTTPError(502, "Failed to check authorization (upstream problem)")
64 elif r.status_code >= 400:
65 self.log.warn("Failed to check authorization: [%i] %s", r.status_code, r.reason)
66 raise HTTPError(500, "Failed to check authorization")
67 else:
68 data = r.json()
69 cookie_cache[encrypted_cookie] = data
70 return data
71
72 @staticmethod
73 def get_user(self):
74 """alternative get_current_user to query the central server"""
75 # only allow this to be called once per handler
76 # avoids issues if an error is raised,
77 # since this may be called again when trying to render the error page
78 if hasattr(self, '_cached_user'):
79 return self._cached_user
80
81 self._cached_user = None
82 my_user = self.settings['user']
83 encrypted_cookie = self.get_cookie(self.cookie_name)
84 if encrypted_cookie:
85 auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)
86 if not auth_data:
87 # treat invalid token the same as no token
88 return None
89 user = auth_data['user']
90 if user == my_user:
91 self._cached_user = user
92 return user
93 else:
94 return None
95 else:
96 self.log.debug("No token cookie")
97 return None
98
99
100 class JupyterHubLogoutHandler(LogoutHandler):
101 def get(self):
102 self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))
103
104
105 # register new hub related command-line aliases
106 aliases = NotebookApp.aliases.get_default_value()
107 aliases.update({
108 'user' : 'SingleUserNotebookApp.user',
109 'cookie-name': 'SingleUserNotebookApp.cookie_name',
110 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
111 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
112 'base-url': 'SingleUserNotebookApp.base_url',
113 })
114
115 class SingleUserNotebookApp(NotebookApp):
116 """A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
117 user = Unicode(config=True)
118 def _user_changed(self, name, old, new):
119 self.log.name = new
120 cookie_name = Unicode(config=True)
121 hub_prefix = Unicode(config=True)
122 hub_api_url = Unicode(config=True)
123 aliases = aliases
124 open_browser = False
125 login_handler_class = JupyterHubLoginHandler
126 logout_handler_class = JupyterHubLogoutHandler
127
128 cookie_cache_lifetime = Integer(
129 config=True,
130 default_value=300,
131 allow_none=True,
132 help="""
133 Time, in seconds, that we cache a validated cookie before requiring
134 revalidation with the hub.
135 """,
136 )
137
138 def _log_datefmt_default(self):
139 """Exclude date from default date format"""
140 return "%Y-%m-%d %H:%M:%S"
141
142 def _log_format_default(self):
143 """override default log format to include time"""
144 return "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
145
146 def _confirm_exit(self):
147 # disable the exit confirmation for background notebook processes
148 ioloop.IOLoop.instance().stop()
149
150 def _clear_cookie_cache(self):
151 self.log.info("Clearing cookie cache")
152 self.tornado_settings['cookie_cache'].clear()
153
154 def initialize(self, argv=None):
155 super().initialize(argv=argv)
156
157 # Start a PeriodicCallback to clear cached cookies. This forces us to
158 # revalidate our user with the Hub at least every
159 # `cookie_cache_lifetime` seconds.
160 if self.cookie_cache_lifetime:
161 ioloop.PeriodicCallback(
162 self._clear_cookie_cache,
163 self.cookie_cache_lifetime * 1e3,
164 ).start()
165
166 def init_webapp(self):
167 # load the hub related settings into the tornado settings dict
168 env = os.environ
169 s = self.tornado_settings
170 s['cookie_cache'] = {}
171 s['user'] = self.user
172 s['hub_api_key'] = env.pop('JPY_API_TOKEN')
173 s['hub_prefix'] = self.hub_prefix
174 s['cookie_name'] = self.cookie_name
175 s['login_url'] = url_path_join(self.hub_prefix, 'login')
176 s['hub_api_url'] = self.hub_api_url
177 super(SingleUserNotebookApp, self).init_webapp()
178
179
180 def main():
181 return SingleUserNotebookApp.launch_instance()
182
183
184 if __name__ == "__main__":
185 main()
186
[end of jupyterhub/singleuser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py
--- a/jupyterhub/singleuser.py
+++ b/jupyterhub/singleuser.py
@@ -15,6 +15,7 @@
from IPython.utils.traitlets import (
Integer,
Unicode,
+ CUnicode,
)
from IPython.html.notebookapp import NotebookApp
@@ -114,7 +115,7 @@
class SingleUserNotebookApp(NotebookApp):
"""A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
- user = Unicode(config=True)
+ user = CUnicode(config=True)
def _user_changed(self, name, old, new):
self.log.name = new
cookie_name = Unicode(config=True)
| {"golden_diff": "diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py\n--- a/jupyterhub/singleuser.py\n+++ b/jupyterhub/singleuser.py\n@@ -15,6 +15,7 @@\n from IPython.utils.traitlets import (\n Integer,\n Unicode,\n+ CUnicode,\n )\n \n from IPython.html.notebookapp import NotebookApp\n@@ -114,7 +115,7 @@\n \n class SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n- user = Unicode(config=True)\n+ user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n", "issue": "The 'user' trait of a SingleUserNotebookApp instance must be a unicode string, but a value of 1384482<class 'int'> was specified.\nI'm using the `SudoSpawner` in combination with Active Directory+PAM based authentication. This results in users being created on the Linux server with numeric IDs that map to AD users. This works fine with users whos AD logins are name-based, but users with numeric IDs cannot log in.\n\nLooking at the logs I've found that the error occurs when trying to launch the instance of `SingleUserNotebookApp` due to the user ID being interpreted as an `int` rather than `str`. \n\n```\nThe 'user' trait of a SingleUserNotebookApp instance must be a unicode string, \n but a value of 1384482 <class 'int'> was specified.\nmfitzp@bms-jupyter:~$ /usr/bin/python3 -m jupyterhub.singleuser --user=\"1384482\" \n --port=36359 --cookie-name=jupyter-hub-token-1384482 --base-url=/user/1384482\n --hub-prefix=/hub/ --hub-api-url=http://localhost:8081/hub/api --ip=localhost \n```\n\nLooking at the source the `user` property is defined as a `Unicode` traitlet so I'm unsure what is going on here? Is this perhaps a IPython issue? In `argparse` it's possible to define the type for command-line arguments to be mapped to, but I could not find the equivalent in the traitlet system.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nfrom urllib.parse import quote\n\nimport requests\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom IPython.utils.traitlets import (\n Integer,\n Unicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['user']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = NotebookApp.aliases.get_default_value()\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = Unicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.info(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n\n def initialize(self, argv=None):\n super().initialize(argv=argv)\n\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n\n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = url_path_join(self.hub_prefix, 'login')\n s['hub_api_url'] = self.hub_api_url\n super(SingleUserNotebookApp, self).init_webapp()\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}]} | 2,857 | 172 |
gh_patches_debug_28593 | rasdani/github-patches | git_diff | Kinto__kinto-827 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consistency on creation ? 403 versus 412
If we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.
If we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.
In the documentation we don't make those corner cases very clear.
Is that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.
Consistency on creation ? 403 versus 412
If we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.
If we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.
In the documentation we don't make those corner cases very clear.
Is that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.
</issue>
<code>
[start of kinto/core/authorization.py]
1 import functools
2
3 from pyramid.settings import aslist
4 from pyramid.security import IAuthorizationPolicy, Authenticated
5 from zope.interface import implementer
6
7 from kinto.core import utils
8 from kinto.core.storage import exceptions as storage_exceptions
9 from kinto.core.authentication import prefixed_userid
10
11 # A permission is called "dynamic" when it's computed at request time.
12 DYNAMIC = 'dynamic'
13
14 # When permission is set to "private", only the current user is allowed.
15 PRIVATE = 'private'
16
17
18 def groupfinder(userid, request):
19 """Fetch principals from permission backend for the specified `userid`.
20
21 This is plugged by default using the ``multiauth.groupfinder`` setting.
22 """
23 backend = getattr(request.registry, 'permission', None)
24 # Permission backend not configured. Ignore.
25 if not backend:
26 return []
27
28 # Safety check when Kinto-Core is used without pyramid_multiauth.
29 if request.prefixed_userid:
30 userid = request.prefixed_userid
31
32 # Query the permission backend only once per request (e.g. batch).
33 reify_key = userid + '_principals'
34 if reify_key not in request.bound_data:
35 principals = backend.get_user_principals(userid)
36 request.bound_data[reify_key] = principals
37
38 return request.bound_data[reify_key]
39
40
41 @implementer(IAuthorizationPolicy)
42 class AuthorizationPolicy(object):
43 """Default authorization class, that leverages the permission backend
44 for shareable resources.
45 """
46
47 get_bound_permissions = None
48 """Callable that takes an object id and a permission and returns
49 a list of tuples (<object id>, <permission>). Useful when objects
50 permission depend on others."""
51
52 def permits(self, context, principals, permission):
53 if permission == PRIVATE:
54 return Authenticated in principals
55
56 # Add prefixed user id to principals.
57 prefixed_userid = context.get_prefixed_userid()
58 if prefixed_userid and ':' in prefixed_userid:
59 principals = principals + [prefixed_userid]
60 prefix, user_id = prefixed_userid.split(':', 1)
61 # Remove unprefixed user id to avoid conflicts.
62 # (it is added via Pyramid Authn policy effective principals)
63 if user_id in principals:
64 principals.remove(user_id)
65 # Retro-compatibility with cliquet 2.0 '_' user id prefixes.
66 # Just in case it was used in permissions definitions.
67 principals.append('%s_%s' % (prefix, user_id))
68
69 if permission == DYNAMIC:
70 permission = context.required_permission
71
72 if permission == 'create':
73 permission = '%s:%s' % (context.resource_name, permission)
74
75 if context.allowed_principals:
76 allowed = bool(set(context.allowed_principals) & set(principals))
77 else:
78 object_id = context.permission_object_id
79 if self.get_bound_permissions is None:
80 bound_perms = [(object_id, permission)]
81 else:
82 bound_perms = self.get_bound_permissions(object_id, permission)
83 allowed = context.check_permission(principals, bound_perms)
84
85 # If not allowed on this collection, but some records are shared with
86 # the current user, then authorize.
87 # The ShareableResource class will take care of the filtering.
88 is_list_operation = (context.on_collection and
89 not permission.endswith('create'))
90 if not allowed and is_list_operation:
91 shared = context.fetch_shared_records(permission,
92 principals,
93 self.get_bound_permissions)
94 allowed = shared is not None
95
96 return allowed
97
98 def principals_allowed_by_permission(self, context, permission):
99 raise NotImplementedError() # PRAGMA NOCOVER
100
101
102 class RouteFactory(object):
103 resource_name = None
104 on_collection = False
105 required_permission = None
106 allowed_principals = None
107 permission_object_id = None
108 current_record = None
109 shared_ids = None
110
111 method_permissions = {
112 "head": "read",
113 "get": "read",
114 "post": "create",
115 "delete": "write",
116 "patch": "write"
117 }
118
119 def __init__(self, request):
120 # Make it available for the authorization policy.
121 self.get_prefixed_userid = functools.partial(prefixed_userid, request)
122
123 # Store some shortcuts.
124 permission = request.registry.permission
125 self.check_permission = permission.check_permission
126 self._get_accessible_objects = permission.get_accessible_objects
127
128 # Store current resource and required permission.
129 service = utils.current_service(request)
130 is_on_resource = (service is not None and
131 hasattr(service, 'viewset') and
132 hasattr(service, 'resource'))
133 if is_on_resource:
134 self.resource_name = request.current_resource_name
135 self.on_collection = getattr(service, "type", None) == "collection"
136
137 self.permission_object_id, self.required_permission = (
138 self._find_required_permission(request, service))
139
140 # To obtain shared records on a collection endpoint, use a match:
141 self._object_id_match = self.get_permission_object_id(request, '*')
142
143 # Check if principals are allowed explicitly from settings.
144 settings = request.registry.settings
145 setting = '%s_%s_principals' % (self.resource_name,
146 self.required_permission)
147 self.allowed_principals = aslist(settings.get(setting, ''))
148
149 def fetch_shared_records(self, perm, principals, get_bound_permissions):
150 """Fetch records that are readable or writable for the current
151 principals.
152
153 See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`
154
155 If no record is shared, it returns None.
156
157 .. warning::
158 This sets the ``shared_ids`` attribute to the context with the
159 return value. The attribute is then read by
160 :class:`kinto.core.resource.ShareableResource`
161 """
162 if get_bound_permissions:
163 bound_perms = get_bound_permissions(self._object_id_match, perm)
164 else:
165 bound_perms = [(self._object_id_match, perm)]
166 by_obj_id = self._get_accessible_objects(principals, bound_perms)
167 ids = by_obj_id.keys()
168 if len(ids) > 0:
169 # Store for later use in ``ShareableResource``.
170 self.shared_ids = [self._extract_object_id(id_) for id_ in ids]
171 else:
172 self.shared_ids = None
173
174 return self.shared_ids
175
176 def get_permission_object_id(self, request, object_id=None):
177 """Returns the permission object id for the current request.
178 In the nominal case, it is just the current URI without version prefix.
179 For collections, it is the related record URI using the specified
180 `object_id`.
181
182 See :meth:`kinto.core.resource.model.SharableModel` and
183 :meth:`kinto.core.authorization.RouteFactory.__init__`
184 """
185 object_uri = utils.strip_uri_prefix(request.path)
186
187 if self.on_collection and object_id is not None:
188 # With the current request on a collection, the record URI must
189 # be found out by inspecting the collection service and its sibling
190 # record service.
191 matchdict = request.matchdict.copy()
192 matchdict['id'] = object_id
193 try:
194 object_uri = utils.instance_uri(request,
195 self.resource_name,
196 **matchdict)
197 if object_id == '*':
198 object_uri = object_uri.replace('%2A', '*')
199 except KeyError:
200 # Maybe the resource has no single record endpoint.
201 # We consider that object URIs in permissions backend will
202 # be stored naively:
203 object_uri = object_uri + '/' + object_id
204
205 return object_uri
206
207 def _extract_object_id(self, object_uri):
208 # XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']
209 return object_uri.split('/')[-1]
210
211 def _find_required_permission(self, request, service):
212 """Find out what is the permission object id and the required
213 permission.
214
215 .. note::
216 This method saves an attribute ``self.current_record`` used
217 in :class:`kinto.core.resource.UserResource`.
218 """
219 # By default, it's a URI a and permission associated to the method.
220 permission_object_id = self.get_permission_object_id(request)
221 method = request.method.lower()
222 required_permission = self.method_permissions.get(method)
223
224 # In the case of a "PUT", check if the targetted record already
225 # exists, return "write" if it does, "create" otherwise.
226 if request.method.lower() == "put":
227 resource = service.resource(request=request, context=self)
228 try:
229 record = resource.model.get_record(resource.record_id)
230 # Save a reference, to avoid refetching from storage in
231 # resource.
232 self.current_record = record
233 except storage_exceptions.RecordNotFoundError:
234 # The record does not exist, the permission to create on
235 # the related collection is required.
236 permission_object_id = service.collection_path.format(
237 **request.matchdict)
238 required_permission = "create"
239 else:
240 required_permission = "write"
241
242 return (permission_object_id, required_permission)
243
[end of kinto/core/authorization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/authorization.py b/kinto/core/authorization.py
--- a/kinto/core/authorization.py
+++ b/kinto/core/authorization.py
@@ -221,6 +221,9 @@
method = request.method.lower()
required_permission = self.method_permissions.get(method)
+ # For create permission, the object id is the plural endpoint.
+ collection_path = service.collection_path.format(**request.matchdict)
+
# In the case of a "PUT", check if the targetted record already
# exists, return "write" if it does, "create" otherwise.
if request.method.lower() == "put":
@@ -233,10 +236,15 @@
except storage_exceptions.RecordNotFoundError:
# The record does not exist, the permission to create on
# the related collection is required.
- permission_object_id = service.collection_path.format(
- **request.matchdict)
+ permission_object_id = collection_path
required_permission = "create"
else:
- required_permission = "write"
+ # For safe creations, the user needs a create permission.
+ # See Kinto/kinto#792
+ if request.headers.get('If-None-Match') == '*':
+ permission_object_id = collection_path
+ required_permission = "create"
+ else:
+ required_permission = "write"
return (permission_object_id, required_permission)
| {"golden_diff": "diff --git a/kinto/core/authorization.py b/kinto/core/authorization.py\n--- a/kinto/core/authorization.py\n+++ b/kinto/core/authorization.py\n@@ -221,6 +221,9 @@\n method = request.method.lower()\n required_permission = self.method_permissions.get(method)\n \n+ # For create permission, the object id is the plural endpoint.\n+ collection_path = service.collection_path.format(**request.matchdict)\n+\n # In the case of a \"PUT\", check if the targetted record already\n # exists, return \"write\" if it does, \"create\" otherwise.\n if request.method.lower() == \"put\":\n@@ -233,10 +236,15 @@\n except storage_exceptions.RecordNotFoundError:\n # The record does not exist, the permission to create on\n # the related collection is required.\n- permission_object_id = service.collection_path.format(\n- **request.matchdict)\n+ permission_object_id = collection_path\n required_permission = \"create\"\n else:\n- required_permission = \"write\"\n+ # For safe creations, the user needs a create permission.\n+ # See Kinto/kinto#792\n+ if request.headers.get('If-None-Match') == '*':\n+ permission_object_id = collection_path\n+ required_permission = \"create\"\n+ else:\n+ required_permission = \"write\"\n \n return (permission_object_id, required_permission)\n", "issue": "Consistency on creation ? 403 versus 412\nIf we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.\n\nIf we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.\nIn the documentation we don't make those corner cases very clear.\n\nIs that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.\n\nConsistency on creation ? 403 versus 412\nIf we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.\n\nIf we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.\nIn the documentation we don't make those corner cases very clear.\n\nIs that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.\n\n", "before_files": [{"content": "import functools\n\nfrom pyramid.settings import aslist\nfrom pyramid.security import IAuthorizationPolicy, Authenticated\nfrom zope.interface import implementer\n\nfrom kinto.core import utils\nfrom kinto.core.storage import exceptions as storage_exceptions\nfrom kinto.core.authentication import prefixed_userid\n\n# A permission is called \"dynamic\" when it's computed at request time.\nDYNAMIC = 'dynamic'\n\n# When permission is set to \"private\", only the current user is allowed.\nPRIVATE = 'private'\n\n\ndef groupfinder(userid, request):\n \"\"\"Fetch principals from permission backend for the specified `userid`.\n\n This is plugged by default using the ``multiauth.groupfinder`` setting.\n \"\"\"\n backend = getattr(request.registry, 'permission', None)\n # Permission backend not configured. Ignore.\n if not backend:\n return []\n\n # Safety check when Kinto-Core is used without pyramid_multiauth.\n if request.prefixed_userid:\n userid = request.prefixed_userid\n\n # Query the permission backend only once per request (e.g. batch).\n reify_key = userid + '_principals'\n if reify_key not in request.bound_data:\n principals = backend.get_user_principals(userid)\n request.bound_data[reify_key] = principals\n\n return request.bound_data[reify_key]\n\n\n@implementer(IAuthorizationPolicy)\nclass AuthorizationPolicy(object):\n \"\"\"Default authorization class, that leverages the permission backend\n for shareable resources.\n \"\"\"\n\n get_bound_permissions = None\n \"\"\"Callable that takes an object id and a permission and returns\n a list of tuples (<object id>, <permission>). Useful when objects\n permission depend on others.\"\"\"\n\n def permits(self, context, principals, permission):\n if permission == PRIVATE:\n return Authenticated in principals\n\n # Add prefixed user id to principals.\n prefixed_userid = context.get_prefixed_userid()\n if prefixed_userid and ':' in prefixed_userid:\n principals = principals + [prefixed_userid]\n prefix, user_id = prefixed_userid.split(':', 1)\n # Remove unprefixed user id to avoid conflicts.\n # (it is added via Pyramid Authn policy effective principals)\n if user_id in principals:\n principals.remove(user_id)\n # Retro-compatibility with cliquet 2.0 '_' user id prefixes.\n # Just in case it was used in permissions definitions.\n principals.append('%s_%s' % (prefix, user_id))\n\n if permission == DYNAMIC:\n permission = context.required_permission\n\n if permission == 'create':\n permission = '%s:%s' % (context.resource_name, permission)\n\n if context.allowed_principals:\n allowed = bool(set(context.allowed_principals) & set(principals))\n else:\n object_id = context.permission_object_id\n if self.get_bound_permissions is None:\n bound_perms = [(object_id, permission)]\n else:\n bound_perms = self.get_bound_permissions(object_id, permission)\n allowed = context.check_permission(principals, bound_perms)\n\n # If not allowed on this collection, but some records are shared with\n # the current user, then authorize.\n # The ShareableResource class will take care of the filtering.\n is_list_operation = (context.on_collection and\n not permission.endswith('create'))\n if not allowed and is_list_operation:\n shared = context.fetch_shared_records(permission,\n principals,\n self.get_bound_permissions)\n allowed = shared is not None\n\n return allowed\n\n def principals_allowed_by_permission(self, context, permission):\n raise NotImplementedError() # PRAGMA NOCOVER\n\n\nclass RouteFactory(object):\n resource_name = None\n on_collection = False\n required_permission = None\n allowed_principals = None\n permission_object_id = None\n current_record = None\n shared_ids = None\n\n method_permissions = {\n \"head\": \"read\",\n \"get\": \"read\",\n \"post\": \"create\",\n \"delete\": \"write\",\n \"patch\": \"write\"\n }\n\n def __init__(self, request):\n # Make it available for the authorization policy.\n self.get_prefixed_userid = functools.partial(prefixed_userid, request)\n\n # Store some shortcuts.\n permission = request.registry.permission\n self.check_permission = permission.check_permission\n self._get_accessible_objects = permission.get_accessible_objects\n\n # Store current resource and required permission.\n service = utils.current_service(request)\n is_on_resource = (service is not None and\n hasattr(service, 'viewset') and\n hasattr(service, 'resource'))\n if is_on_resource:\n self.resource_name = request.current_resource_name\n self.on_collection = getattr(service, \"type\", None) == \"collection\"\n\n self.permission_object_id, self.required_permission = (\n self._find_required_permission(request, service))\n\n # To obtain shared records on a collection endpoint, use a match:\n self._object_id_match = self.get_permission_object_id(request, '*')\n\n # Check if principals are allowed explicitly from settings.\n settings = request.registry.settings\n setting = '%s_%s_principals' % (self.resource_name,\n self.required_permission)\n self.allowed_principals = aslist(settings.get(setting, ''))\n\n def fetch_shared_records(self, perm, principals, get_bound_permissions):\n \"\"\"Fetch records that are readable or writable for the current\n principals.\n\n See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`\n\n If no record is shared, it returns None.\n\n .. warning::\n This sets the ``shared_ids`` attribute to the context with the\n return value. The attribute is then read by\n :class:`kinto.core.resource.ShareableResource`\n \"\"\"\n if get_bound_permissions:\n bound_perms = get_bound_permissions(self._object_id_match, perm)\n else:\n bound_perms = [(self._object_id_match, perm)]\n by_obj_id = self._get_accessible_objects(principals, bound_perms)\n ids = by_obj_id.keys()\n if len(ids) > 0:\n # Store for later use in ``ShareableResource``.\n self.shared_ids = [self._extract_object_id(id_) for id_ in ids]\n else:\n self.shared_ids = None\n\n return self.shared_ids\n\n def get_permission_object_id(self, request, object_id=None):\n \"\"\"Returns the permission object id for the current request.\n In the nominal case, it is just the current URI without version prefix.\n For collections, it is the related record URI using the specified\n `object_id`.\n\n See :meth:`kinto.core.resource.model.SharableModel` and\n :meth:`kinto.core.authorization.RouteFactory.__init__`\n \"\"\"\n object_uri = utils.strip_uri_prefix(request.path)\n\n if self.on_collection and object_id is not None:\n # With the current request on a collection, the record URI must\n # be found out by inspecting the collection service and its sibling\n # record service.\n matchdict = request.matchdict.copy()\n matchdict['id'] = object_id\n try:\n object_uri = utils.instance_uri(request,\n self.resource_name,\n **matchdict)\n if object_id == '*':\n object_uri = object_uri.replace('%2A', '*')\n except KeyError:\n # Maybe the resource has no single record endpoint.\n # We consider that object URIs in permissions backend will\n # be stored naively:\n object_uri = object_uri + '/' + object_id\n\n return object_uri\n\n def _extract_object_id(self, object_uri):\n # XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']\n return object_uri.split('/')[-1]\n\n def _find_required_permission(self, request, service):\n \"\"\"Find out what is the permission object id and the required\n permission.\n\n .. note::\n This method saves an attribute ``self.current_record`` used\n in :class:`kinto.core.resource.UserResource`.\n \"\"\"\n # By default, it's a URI a and permission associated to the method.\n permission_object_id = self.get_permission_object_id(request)\n method = request.method.lower()\n required_permission = self.method_permissions.get(method)\n\n # In the case of a \"PUT\", check if the targetted record already\n # exists, return \"write\" if it does, \"create\" otherwise.\n if request.method.lower() == \"put\":\n resource = service.resource(request=request, context=self)\n try:\n record = resource.model.get_record(resource.record_id)\n # Save a reference, to avoid refetching from storage in\n # resource.\n self.current_record = record\n except storage_exceptions.RecordNotFoundError:\n # The record does not exist, the permission to create on\n # the related collection is required.\n permission_object_id = service.collection_path.format(\n **request.matchdict)\n required_permission = \"create\"\n else:\n required_permission = \"write\"\n\n return (permission_object_id, required_permission)\n", "path": "kinto/core/authorization.py"}]} | 3,358 | 316 |
gh_patches_debug_30023 | rasdani/github-patches | git_diff | scalableminds__webknossos-libs-1083 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Webknossos CLI download annotation
## Context
- Affected library: webknossos
When using the webknossos CLI to download an annotation, the download command is overloaded to take care of dataset downloads and annotation downloads. An annotation should be downloaded when no dataset is found. This is triggered by an AssertionError. Currently no assertion is thrown. Instead, an UnexpectedStatusError is raised.
## Expected Behavior
When a URL to an Annotation is given, the Annotation should be downloaded.
</issue>
<code>
[start of webknossos/webknossos/cli/download.py]
1 """This module takes care of downloading WEBKNOSSOS datasets."""
2
3 from typing import Any, List, Optional
4
5 import typer
6 from typing_extensions import Annotated
7
8 from ..annotation import Annotation
9 from ..client import webknossos_context
10 from ..dataset import Dataset
11 from ..geometry import BoundingBox, Mag
12 from ._utils import parse_bbox, parse_mag, parse_path
13
14
15 def main(
16 *,
17 target: Annotated[
18 Any,
19 typer.Argument(
20 show_default=False,
21 help="Path to save your WEBKNOSSOS dataset.",
22 parser=parse_path,
23 ),
24 ],
25 url: Annotated[
26 str,
27 typer.Option(
28 help="URL of your dataset or your annotation.",
29 ),
30 ],
31 token: Annotated[
32 Optional[str],
33 typer.Option(
34 help="Authentication token for WEBKNOSSOS instance "
35 "(https://webknossos.org/auth/token).",
36 rich_help_panel="WEBKNOSSOS context",
37 envvar="WK_TOKEN",
38 ),
39 ] = None,
40 bbox: Annotated[
41 Optional[BoundingBox],
42 typer.Option(
43 rich_help_panel="Partial download",
44 help="Bounding box that should be downloaded. "
45 "The input format is x,y,z,width,height,depth. "
46 "Should be a comma separated string (e.g. 0,0,0,10,10,10).",
47 parser=parse_bbox,
48 metavar="BBOX",
49 ),
50 ] = None,
51 layer: Annotated[
52 Optional[List[str]],
53 typer.Option(
54 rich_help_panel="Partial download",
55 help="Layers that should be downloaded. "
56 "For multiple layers type: --layer color --layer segmentation",
57 ),
58 ] = None,
59 mag: Annotated[
60 Optional[List[Mag]],
61 typer.Option(
62 rich_help_panel="Partial download",
63 help="Mags that should be downloaded. "
64 "Should be number or minus separated string (e.g. 2 or 2-2-2). "
65 "For multiple mags type: --mag 1 --mag 2",
66 parser=parse_mag,
67 metavar="MAG",
68 ),
69 ] = None,
70 ) -> None:
71 """Download a dataset from a WEBKNOSSOS server."""
72
73 layers = layer if layer else None
74 mags = mag if mag else None
75
76 with webknossos_context(token=token):
77 try:
78 Dataset.download(
79 dataset_name_or_url=url,
80 path=target,
81 bbox=bbox,
82 layers=layers,
83 mags=mags,
84 )
85 except AssertionError:
86 Annotation.download(annotation_id_or_url=url).save(target)
87
[end of webknossos/webknossos/cli/download.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/webknossos/webknossos/cli/download.py b/webknossos/webknossos/cli/download.py
--- a/webknossos/webknossos/cli/download.py
+++ b/webknossos/webknossos/cli/download.py
@@ -1,13 +1,15 @@
"""This module takes care of downloading WEBKNOSSOS datasets."""
+import re
from typing import Any, List, Optional
import typer
from typing_extensions import Annotated
-from ..annotation import Annotation
+from ..annotation.annotation import _ANNOTATION_URL_REGEX, Annotation
from ..client import webknossos_context
-from ..dataset import Dataset
+from ..client._resolve_short_link import resolve_short_link
+from ..dataset.dataset import _DATASET_URL_REGEX, Dataset
from ..geometry import BoundingBox, Mag
from ._utils import parse_bbox, parse_mag, parse_path
@@ -72,9 +74,10 @@
layers = layer if layer else None
mags = mag if mag else None
+ url = resolve_short_link(url)
with webknossos_context(token=token):
- try:
+ if re.match(_DATASET_URL_REGEX, url):
Dataset.download(
dataset_name_or_url=url,
path=target,
@@ -82,5 +85,13 @@
layers=layers,
mags=mags,
)
- except AssertionError:
+ elif re.match(_ANNOTATION_URL_REGEX, url):
Annotation.download(annotation_id_or_url=url).save(target)
+ else:
+ raise RuntimeError(
+ "The provided URL does not lead to a dataset or annotation."
+ )
+
+
+if __name__ == "__main__":
+ typer.run(main)
| {"golden_diff": "diff --git a/webknossos/webknossos/cli/download.py b/webknossos/webknossos/cli/download.py\n--- a/webknossos/webknossos/cli/download.py\n+++ b/webknossos/webknossos/cli/download.py\n@@ -1,13 +1,15 @@\n \"\"\"This module takes care of downloading WEBKNOSSOS datasets.\"\"\"\n \n+import re\n from typing import Any, List, Optional\n \n import typer\n from typing_extensions import Annotated\n \n-from ..annotation import Annotation\n+from ..annotation.annotation import _ANNOTATION_URL_REGEX, Annotation\n from ..client import webknossos_context\n-from ..dataset import Dataset\n+from ..client._resolve_short_link import resolve_short_link\n+from ..dataset.dataset import _DATASET_URL_REGEX, Dataset\n from ..geometry import BoundingBox, Mag\n from ._utils import parse_bbox, parse_mag, parse_path\n \n@@ -72,9 +74,10 @@\n \n layers = layer if layer else None\n mags = mag if mag else None\n+ url = resolve_short_link(url)\n \n with webknossos_context(token=token):\n- try:\n+ if re.match(_DATASET_URL_REGEX, url):\n Dataset.download(\n dataset_name_or_url=url,\n path=target,\n@@ -82,5 +85,13 @@\n layers=layers,\n mags=mags,\n )\n- except AssertionError:\n+ elif re.match(_ANNOTATION_URL_REGEX, url):\n Annotation.download(annotation_id_or_url=url).save(target)\n+ else:\n+ raise RuntimeError(\n+ \"The provided URL does not lead to a dataset or annotation.\"\n+ )\n+\n+\n+if __name__ == \"__main__\":\n+ typer.run(main)\n", "issue": "Webknossos CLI download annotation\n## Context\r\n- Affected library: webknossos\r\nWhen using the webknossos CLI to download an annotation, the download command is overloaded to take care of dataset downloads and annotation downloads. An annotation should be downloaded when no dataset is found. This is triggered by an AssertionError. Currently no assertion is thrown. Instead, an UnexpectedStatusError is raised.\r\n\r\n## Expected Behavior\r\nWhen a URL to an Annotation is given, the Annotation should be downloaded.\r\n\r\n\n", "before_files": [{"content": "\"\"\"This module takes care of downloading WEBKNOSSOS datasets.\"\"\"\n\nfrom typing import Any, List, Optional\n\nimport typer\nfrom typing_extensions import Annotated\n\nfrom ..annotation import Annotation\nfrom ..client import webknossos_context\nfrom ..dataset import Dataset\nfrom ..geometry import BoundingBox, Mag\nfrom ._utils import parse_bbox, parse_mag, parse_path\n\n\ndef main(\n *,\n target: Annotated[\n Any,\n typer.Argument(\n show_default=False,\n help=\"Path to save your WEBKNOSSOS dataset.\",\n parser=parse_path,\n ),\n ],\n url: Annotated[\n str,\n typer.Option(\n help=\"URL of your dataset or your annotation.\",\n ),\n ],\n token: Annotated[\n Optional[str],\n typer.Option(\n help=\"Authentication token for WEBKNOSSOS instance \"\n \"(https://webknossos.org/auth/token).\",\n rich_help_panel=\"WEBKNOSSOS context\",\n envvar=\"WK_TOKEN\",\n ),\n ] = None,\n bbox: Annotated[\n Optional[BoundingBox],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Bounding box that should be downloaded. \"\n \"The input format is x,y,z,width,height,depth. \"\n \"Should be a comma separated string (e.g. 0,0,0,10,10,10).\",\n parser=parse_bbox,\n metavar=\"BBOX\",\n ),\n ] = None,\n layer: Annotated[\n Optional[List[str]],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Layers that should be downloaded. \"\n \"For multiple layers type: --layer color --layer segmentation\",\n ),\n ] = None,\n mag: Annotated[\n Optional[List[Mag]],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Mags that should be downloaded. \"\n \"Should be number or minus separated string (e.g. 2 or 2-2-2). \"\n \"For multiple mags type: --mag 1 --mag 2\",\n parser=parse_mag,\n metavar=\"MAG\",\n ),\n ] = None,\n) -> None:\n \"\"\"Download a dataset from a WEBKNOSSOS server.\"\"\"\n\n layers = layer if layer else None\n mags = mag if mag else None\n\n with webknossos_context(token=token):\n try:\n Dataset.download(\n dataset_name_or_url=url,\n path=target,\n bbox=bbox,\n layers=layers,\n mags=mags,\n )\n except AssertionError:\n Annotation.download(annotation_id_or_url=url).save(target)\n", "path": "webknossos/webknossos/cli/download.py"}]} | 1,393 | 379 |
gh_patches_debug_5052 | rasdani/github-patches | git_diff | TheAlgorithms__Python-8766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
I found typo in graphs/greedy_best_first
### What would you like to share?
The dx and dy are reversed!
https://github.com/TheAlgorithms/Python/blob/ce43a8ac4ad14e1639014d374b1137906218cfe3/graphs/greedy_best_first.py#L61-L63
Expected correction
```python
dx = abs(self.pos_x - self.goal_x)
dy = abs(self.pos_y - self.goal_y)
return dx + dy
```
### Additional information
_No response_
</issue>
<code>
[start of graphs/greedy_best_first.py]
1 """
2 https://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS
3 """
4
5 from __future__ import annotations
6
7 Path = list[tuple[int, int]]
8
9 grid = [
10 [0, 0, 0, 0, 0, 0, 0],
11 [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
12 [0, 0, 0, 0, 0, 0, 0],
13 [0, 0, 1, 0, 0, 0, 0],
14 [1, 0, 1, 0, 0, 0, 0],
15 [0, 0, 0, 0, 0, 0, 0],
16 [0, 0, 0, 0, 1, 0, 0],
17 ]
18
19 delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
20
21
22 class Node:
23 """
24 >>> k = Node(0, 0, 4, 5, 0, None)
25 >>> k.calculate_heuristic()
26 9
27 >>> n = Node(1, 4, 3, 4, 2, None)
28 >>> n.calculate_heuristic()
29 2
30 >>> l = [k, n]
31 >>> n == l[0]
32 False
33 >>> l.sort()
34 >>> n == l[0]
35 True
36 """
37
38 def __init__(
39 self,
40 pos_x: int,
41 pos_y: int,
42 goal_x: int,
43 goal_y: int,
44 g_cost: float,
45 parent: Node | None,
46 ):
47 self.pos_x = pos_x
48 self.pos_y = pos_y
49 self.pos = (pos_y, pos_x)
50 self.goal_x = goal_x
51 self.goal_y = goal_y
52 self.g_cost = g_cost
53 self.parent = parent
54 self.f_cost = self.calculate_heuristic()
55
56 def calculate_heuristic(self) -> float:
57 """
58 The heuristic here is the Manhattan Distance
59 Could elaborate to offer more than one choice
60 """
61 dy = abs(self.pos_x - self.goal_x)
62 dx = abs(self.pos_y - self.goal_y)
63 return dx + dy
64
65 def __lt__(self, other) -> bool:
66 return self.f_cost < other.f_cost
67
68
69 class GreedyBestFirst:
70 """
71 >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))
72 >>> [x.pos for x in gbf.get_successors(gbf.start)]
73 [(1, 0), (0, 1)]
74 >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])
75 (0, 1)
76 >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])
77 (1, 0)
78 >>> gbf.retrace_path(gbf.start)
79 [(0, 0)]
80 >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE
81 [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),
82 (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
83 """
84
85 def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
86 self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
87 self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
88
89 self.open_nodes = [self.start]
90 self.closed_nodes: list[Node] = []
91
92 self.reached = False
93
94 def search(self) -> Path | None:
95 """
96 Search for the path,
97 if a path is not found, only the starting position is returned
98 """
99 while self.open_nodes:
100 # Open Nodes are sorted using __lt__
101 self.open_nodes.sort()
102 current_node = self.open_nodes.pop(0)
103
104 if current_node.pos == self.target.pos:
105 self.reached = True
106 return self.retrace_path(current_node)
107
108 self.closed_nodes.append(current_node)
109 successors = self.get_successors(current_node)
110
111 for child_node in successors:
112 if child_node in self.closed_nodes:
113 continue
114
115 if child_node not in self.open_nodes:
116 self.open_nodes.append(child_node)
117 else:
118 # retrieve the best current path
119 better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
120
121 if child_node.g_cost < better_node.g_cost:
122 self.open_nodes.append(child_node)
123 else:
124 self.open_nodes.append(better_node)
125
126 if not self.reached:
127 return [self.start.pos]
128 return None
129
130 def get_successors(self, parent: Node) -> list[Node]:
131 """
132 Returns a list of successors (both in the grid and free spaces)
133 """
134 successors = []
135 for action in delta:
136 pos_x = parent.pos_x + action[1]
137 pos_y = parent.pos_y + action[0]
138
139 if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
140 continue
141
142 if grid[pos_y][pos_x] != 0:
143 continue
144
145 successors.append(
146 Node(
147 pos_x,
148 pos_y,
149 self.target.pos_y,
150 self.target.pos_x,
151 parent.g_cost + 1,
152 parent,
153 )
154 )
155 return successors
156
157 def retrace_path(self, node: Node | None) -> Path:
158 """
159 Retrace the path from parents to parents until start node
160 """
161 current_node = node
162 path = []
163 while current_node is not None:
164 path.append((current_node.pos_y, current_node.pos_x))
165 current_node = current_node.parent
166 path.reverse()
167 return path
168
169
170 if __name__ == "__main__":
171 init = (0, 0)
172 goal = (len(grid) - 1, len(grid[0]) - 1)
173 for elem in grid:
174 print(elem)
175
176 print("------")
177
178 greedy_bf = GreedyBestFirst(init, goal)
179 path = greedy_bf.search()
180 if path:
181 for pos_x, pos_y in path:
182 grid[pos_x][pos_y] = 2
183
184 for elem in grid:
185 print(elem)
186
[end of graphs/greedy_best_first.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py
--- a/graphs/greedy_best_first.py
+++ b/graphs/greedy_best_first.py
@@ -58,8 +58,8 @@
The heuristic here is the Manhattan Distance
Could elaborate to offer more than one choice
"""
- dy = abs(self.pos_x - self.goal_x)
- dx = abs(self.pos_y - self.goal_y)
+ dx = abs(self.pos_x - self.goal_x)
+ dy = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__(self, other) -> bool:
| {"golden_diff": "diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py\n--- a/graphs/greedy_best_first.py\n+++ b/graphs/greedy_best_first.py\n@@ -58,8 +58,8 @@\n The heuristic here is the Manhattan Distance\n Could elaborate to offer more than one choice\n \"\"\"\n- dy = abs(self.pos_x - self.goal_x)\n- dx = abs(self.pos_y - self.goal_y)\n+ dx = abs(self.pos_x - self.goal_x)\n+ dy = abs(self.pos_y - self.goal_y)\n return dx + dy\n \n def __lt__(self, other) -> bool:\n", "issue": "I found typo in graphs/greedy_best_first\n### What would you like to share?\n\nThe dx and dy are reversed!\r\nhttps://github.com/TheAlgorithms/Python/blob/ce43a8ac4ad14e1639014d374b1137906218cfe3/graphs/greedy_best_first.py#L61-L63\r\n\r\nExpected correction\r\n```python\r\ndx = abs(self.pos_x - self.goal_x)\r\ndy = abs(self.pos_y - self.goal_y)\r\nreturn dx + dy\r\n```\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nhttps://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS\n\"\"\"\n\nfrom __future__ import annotations\n\nPath = list[tuple[int, int]]\n\ngrid = [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n]\n\ndelta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right\n\n\nclass Node:\n \"\"\"\n >>> k = Node(0, 0, 4, 5, 0, None)\n >>> k.calculate_heuristic()\n 9\n >>> n = Node(1, 4, 3, 4, 2, None)\n >>> n.calculate_heuristic()\n 2\n >>> l = [k, n]\n >>> n == l[0]\n False\n >>> l.sort()\n >>> n == l[0]\n True\n \"\"\"\n\n def __init__(\n self,\n pos_x: int,\n pos_y: int,\n goal_x: int,\n goal_y: int,\n g_cost: float,\n parent: Node | None,\n ):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.pos = (pos_y, pos_x)\n self.goal_x = goal_x\n self.goal_y = goal_y\n self.g_cost = g_cost\n self.parent = parent\n self.f_cost = self.calculate_heuristic()\n\n def calculate_heuristic(self) -> float:\n \"\"\"\n The heuristic here is the Manhattan Distance\n Could elaborate to offer more than one choice\n \"\"\"\n dy = abs(self.pos_x - self.goal_x)\n dx = abs(self.pos_y - self.goal_y)\n return dx + dy\n\n def __lt__(self, other) -> bool:\n return self.f_cost < other.f_cost\n\n\nclass GreedyBestFirst:\n \"\"\"\n >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))\n >>> [x.pos for x in gbf.get_successors(gbf.start)]\n [(1, 0), (0, 1)]\n >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])\n (0, 1)\n >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])\n (1, 0)\n >>> gbf.retrace_path(gbf.start)\n [(0, 0)]\n >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE\n [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),\n (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]\n \"\"\"\n\n def __init__(self, start: tuple[int, int], goal: tuple[int, int]):\n self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)\n self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)\n\n self.open_nodes = [self.start]\n self.closed_nodes: list[Node] = []\n\n self.reached = False\n\n def search(self) -> Path | None:\n \"\"\"\n Search for the path,\n if a path is not found, only the starting position is returned\n \"\"\"\n while self.open_nodes:\n # Open Nodes are sorted using __lt__\n self.open_nodes.sort()\n current_node = self.open_nodes.pop(0)\n\n if current_node.pos == self.target.pos:\n self.reached = True\n return self.retrace_path(current_node)\n\n self.closed_nodes.append(current_node)\n successors = self.get_successors(current_node)\n\n for child_node in successors:\n if child_node in self.closed_nodes:\n continue\n\n if child_node not in self.open_nodes:\n self.open_nodes.append(child_node)\n else:\n # retrieve the best current path\n better_node = self.open_nodes.pop(self.open_nodes.index(child_node))\n\n if child_node.g_cost < better_node.g_cost:\n self.open_nodes.append(child_node)\n else:\n self.open_nodes.append(better_node)\n\n if not self.reached:\n return [self.start.pos]\n return None\n\n def get_successors(self, parent: Node) -> list[Node]:\n \"\"\"\n Returns a list of successors (both in the grid and free spaces)\n \"\"\"\n successors = []\n for action in delta:\n pos_x = parent.pos_x + action[1]\n pos_y = parent.pos_y + action[0]\n\n if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):\n continue\n\n if grid[pos_y][pos_x] != 0:\n continue\n\n successors.append(\n Node(\n pos_x,\n pos_y,\n self.target.pos_y,\n self.target.pos_x,\n parent.g_cost + 1,\n parent,\n )\n )\n return successors\n\n def retrace_path(self, node: Node | None) -> Path:\n \"\"\"\n Retrace the path from parents to parents until start node\n \"\"\"\n current_node = node\n path = []\n while current_node is not None:\n path.append((current_node.pos_y, current_node.pos_x))\n current_node = current_node.parent\n path.reverse()\n return path\n\n\nif __name__ == \"__main__\":\n init = (0, 0)\n goal = (len(grid) - 1, len(grid[0]) - 1)\n for elem in grid:\n print(elem)\n\n print(\"------\")\n\n greedy_bf = GreedyBestFirst(init, goal)\n path = greedy_bf.search()\n if path:\n for pos_x, pos_y in path:\n grid[pos_x][pos_y] = 2\n\n for elem in grid:\n print(elem)\n", "path": "graphs/greedy_best_first.py"}]} | 2,654 | 147 |
gh_patches_debug_37238 | rasdani/github-patches | git_diff | conan-io__conan-16103 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feature] show uploaded files size in `conan upload`
<!-- What is your suggestion? Please be as specific as possible! -->
It would be very convenient to provide the size of files which will be upload to specific remote via `conan upload ...` in diagnostic purposes, e.g. sometimes artifacts could be very large and CI could refuse its uploading, see e.g. [this StackOverflow question](https://stackoverflow.com/questions/64329087/gitlab-self-hosted-error-uploading-artifacts-as-archive-to-coordinator-to) as example of related CI error. With this change CI administrators could adjust the file limit at CI without trial and error changing of CI configs.
- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
</issue>
<code>
[start of conans/client/cmd/uploader.py]
1 import os
2 import shutil
3 import time
4
5 from conan.internal.conan_app import ConanApp
6 from conan.api.output import ConanOutput
7 from conans.client.source import retrieve_exports_sources
8 from conans.errors import ConanException, NotFoundException
9 from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,
10 EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)
11 from conans.util.files import (clean_dirty, is_dirty, gather_files,
12 gzopen_without_timestamps, set_dirty_context_manager, mkdir)
13
14 UPLOAD_POLICY_FORCE = "force-upload"
15 UPLOAD_POLICY_SKIP = "skip-upload"
16
17
18 class UploadUpstreamChecker:
19 """ decides if something needs to be uploaded or force-uploaded checking if that exact
20 revision already exists in the remote server, or if the --force parameter is forcing the upload
21 This is completely irrespective of the actual package contents, it only uses the local
22 computed revision and the remote one
23 """
24 def __init__(self, app: ConanApp):
25 self._app = app
26
27 def check(self, upload_bundle, remote, force):
28 for ref, recipe_bundle in upload_bundle.refs().items():
29 self._check_upstream_recipe(ref, recipe_bundle, remote, force)
30 for pref, prev_bundle in upload_bundle.prefs(ref, recipe_bundle).items():
31 self._check_upstream_package(pref, prev_bundle, remote, force)
32
33 def _check_upstream_recipe(self, ref, ref_bundle, remote, force):
34 output = ConanOutput(scope=str(ref))
35 output.info("Checking which revisions exist in the remote server")
36 try:
37 assert ref.revision
38 # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence
39 server_ref = self._app.remote_manager.get_recipe_revision_reference(ref, remote)
40 assert server_ref # If successful (not raising NotFoundException), this will exist
41 except NotFoundException:
42 ref_bundle["force_upload"] = False
43 ref_bundle["upload"] = True
44 else:
45 if force:
46 output.info(f"Recipe '{ref.repr_notime()}' already in server, forcing upload")
47 ref_bundle["force_upload"] = True
48 ref_bundle["upload"] = True
49 else:
50 output.info(f"Recipe '{ref.repr_notime()}' already in server, skipping upload")
51 ref_bundle["upload"] = False
52 ref_bundle["force_upload"] = False
53
54 def _check_upstream_package(self, pref, prev_bundle, remote, force):
55 assert (pref.revision is not None), "Cannot upload a package without PREV"
56 assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
57
58 try:
59 # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence
60 server_revisions = self._app.remote_manager.get_package_revision_reference(pref, remote)
61 assert server_revisions
62 except NotFoundException:
63 prev_bundle["force_upload"] = False
64 prev_bundle["upload"] = True
65 else:
66 output = ConanOutput(scope=str(pref.ref))
67 if force:
68 output.info(f"Package '{pref.repr_notime()}' already in server, forcing upload")
69 prev_bundle["force_upload"] = True
70 prev_bundle["upload"] = True
71 else:
72 output.info(f"Package '{pref.repr_notime()}' already in server, skipping upload")
73 prev_bundle["force_upload"] = False
74 prev_bundle["upload"] = False
75
76
77 class PackagePreparator:
78 def __init__(self, app: ConanApp, global_conf):
79 self._app = app
80 self._global_conf = global_conf
81
82 def prepare(self, upload_bundle, enabled_remotes):
83 for ref, bundle in upload_bundle.refs().items():
84 layout = self._app.cache.recipe_layout(ref)
85 conanfile_path = layout.conanfile()
86 conanfile = self._app.loader.load_basic(conanfile_path)
87
88 if bundle.get("upload"):
89 self._prepare_recipe(ref, bundle, conanfile, enabled_remotes)
90 for pref, prev_bundle in upload_bundle.prefs(ref, bundle).items():
91 if prev_bundle.get("upload"):
92 self._prepare_package(pref, prev_bundle)
93
94 def _prepare_recipe(self, ref, ref_bundle, conanfile, remotes):
95 """ do a bunch of things that are necessary before actually executing the upload:
96 - retrieve exports_sources to complete the recipe if necessary
97 - compress the artifacts in conan_export.tgz and conan_export_sources.tgz
98 """
99 try:
100 recipe_layout = self._app.cache.recipe_layout(ref)
101 retrieve_exports_sources(self._app.remote_manager, recipe_layout, conanfile, ref,
102 remotes)
103 cache_files = self._compress_recipe_files(recipe_layout, ref)
104 ref_bundle["files"] = cache_files
105 except Exception as e:
106 raise ConanException(f"{ref} Error while compressing: {e}")
107
108 def _compress_recipe_files(self, layout, ref):
109 download_export_folder = layout.download_export()
110
111 output = ConanOutput(scope=str(ref))
112 for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):
113 tgz_path = os.path.join(download_export_folder, f)
114 if is_dirty(tgz_path):
115 output.warning("Removing %s, marked as dirty" % f)
116 os.remove(tgz_path)
117 clean_dirty(tgz_path)
118
119 export_folder = layout.export()
120 files, symlinked_folders = gather_files(export_folder)
121 files.update(symlinked_folders)
122 if CONANFILE not in files or CONAN_MANIFEST not in files:
123 raise ConanException("Cannot upload corrupted recipe '%s'" % str(ref))
124 export_src_folder = layout.export_sources()
125 src_files, src_symlinked_folders = gather_files(export_src_folder)
126 src_files.update(src_symlinked_folders)
127
128 # We do a copy of conanfile and conanmanifest to the download_export_folder
129 # so it is identical as when it is downloaded, and all files are from the same location
130 # to be uploaded
131 mkdir(download_export_folder)
132 shutil.copy2(os.path.join(export_folder, CONANFILE),
133 os.path.join(download_export_folder, CONANFILE))
134 shutil.copy2(os.path.join(export_folder, CONAN_MANIFEST),
135 os.path.join(download_export_folder, CONAN_MANIFEST))
136 result = {CONANFILE: os.path.join(download_export_folder, CONANFILE),
137 CONAN_MANIFEST: os.path.join(download_export_folder, CONAN_MANIFEST)}
138 # Files NOT included in the tgz
139 files.pop(CONANFILE)
140 files.pop(CONAN_MANIFEST)
141
142 def add_tgz(tgz_name, tgz_files):
143 tgz = os.path.join(download_export_folder, tgz_name)
144 if os.path.isfile(tgz):
145 result[tgz_name] = tgz
146 elif tgz_files:
147 compresslevel = self._global_conf.get("core.gzip:compresslevel", check_type=int)
148 tgz = compress_files(tgz_files, tgz_name, download_export_folder,
149 compresslevel=compresslevel, ref=ref)
150 result[tgz_name] = tgz
151
152 add_tgz(EXPORT_TGZ_NAME, files)
153 add_tgz(EXPORT_SOURCES_TGZ_NAME, src_files)
154 return result
155
156 def _prepare_package(self, pref, prev_bundle):
157 pkg_layout = self._app.cache.pkg_layout(pref)
158 if pkg_layout.package_is_dirty():
159 raise ConanException(f"Package {pref} is corrupted, aborting upload.\n"
160 f"Remove it with 'conan remove {pref}'")
161 cache_files = self._compress_package_files(pkg_layout, pref)
162 prev_bundle["files"] = cache_files
163
164 def _compress_package_files(self, layout, pref):
165 output = ConanOutput(scope=str(pref))
166 download_pkg_folder = layout.download_package()
167 package_tgz = os.path.join(download_pkg_folder, PACKAGE_TGZ_NAME)
168 if is_dirty(package_tgz):
169 output.warning("Removing %s, marked as dirty" % PACKAGE_TGZ_NAME)
170 os.remove(package_tgz)
171 clean_dirty(package_tgz)
172
173 # Get all the files in that directory
174 # existing package, will use short paths if defined
175 package_folder = layout.package()
176 files, symlinked_folders = gather_files(package_folder)
177 files.update(symlinked_folders)
178
179 if CONANINFO not in files or CONAN_MANIFEST not in files:
180 raise ConanException("Cannot upload corrupted package '%s'" % str(pref))
181
182 # Do a copy so the location of CONANINFO and MANIFEST is the "download" folder one
183 mkdir(download_pkg_folder)
184 shutil.copy2(os.path.join(package_folder, CONANINFO),
185 os.path.join(download_pkg_folder, CONANINFO))
186 shutil.copy2(os.path.join(package_folder, CONAN_MANIFEST),
187 os.path.join(download_pkg_folder, CONAN_MANIFEST))
188 # Files NOT included in the tgz
189 files.pop(CONANINFO)
190 files.pop(CONAN_MANIFEST)
191
192 if not os.path.isfile(package_tgz):
193 tgz_files = {f: path for f, path in files.items()}
194 compresslevel = self._global_conf.get("core.gzip:compresslevel", check_type=int)
195 tgz_path = compress_files(tgz_files, PACKAGE_TGZ_NAME, download_pkg_folder,
196 compresslevel=compresslevel, ref=pref)
197 assert tgz_path == package_tgz
198 assert os.path.exists(package_tgz)
199
200 return {PACKAGE_TGZ_NAME: package_tgz,
201 CONANINFO: os.path.join(download_pkg_folder, CONANINFO),
202 CONAN_MANIFEST: os.path.join(download_pkg_folder, CONAN_MANIFEST)}
203
204
205 class UploadExecutor:
206 """ does the actual file transfer to the remote. The files to be uploaded have already
207 been computed and are passed in the ``upload_data`` parameter, so this executor is also
208 agnostic about which files are transferred
209 """
210 def __init__(self, app: ConanApp):
211 self._app = app
212
213 def upload(self, upload_data, remote):
214 for ref, bundle in upload_data.refs().items():
215 if bundle.get("upload"):
216 self.upload_recipe(ref, bundle, remote)
217 for pref, prev_bundle in upload_data.prefs(ref, bundle).items():
218 if prev_bundle.get("upload"):
219 self.upload_package(pref, prev_bundle, remote)
220
221 def upload_recipe(self, ref, bundle, remote):
222 output = ConanOutput(scope=str(ref))
223 output.info(f"Uploading recipe '{ref.repr_notime()}'")
224 t1 = time.time()
225 cache_files = bundle["files"]
226
227 self._app.remote_manager.upload_recipe(ref, cache_files, remote)
228
229 duration = time.time() - t1
230 output.debug(f"Upload {ref} in {duration} time")
231 return ref
232
233 def upload_package(self, pref, prev_bundle, remote):
234 output = ConanOutput(scope=str(pref.ref))
235 output.info(f"Uploading package '{pref.repr_notime()}'")
236 cache_files = prev_bundle["files"]
237 assert (pref.revision is not None), "Cannot upload a package without PREV"
238 assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
239
240 t1 = time.time()
241 self._app.remote_manager.upload_package(pref, cache_files, remote)
242 duration = time.time() - t1
243 output.debug(f"Upload {pref} in {duration} time")
244
245
246 def compress_files(files, name, dest_dir, compresslevel=None, ref=None):
247 t1 = time.time()
248 # FIXME, better write to disk sequentially and not keep tgz contents in memory
249 tgz_path = os.path.join(dest_dir, name)
250 ConanOutput(scope=str(ref)).info(f"Compressing {name}")
251 with set_dirty_context_manager(tgz_path), open(tgz_path, "wb") as tgz_handle:
252 tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle,
253 compresslevel=compresslevel)
254 for filename, abs_path in sorted(files.items()):
255 # recursive is False in case it is a symlink to a folder
256 tgz.add(abs_path, filename, recursive=False)
257 tgz.close()
258
259 duration = time.time() - t1
260 ConanOutput().debug(f"{name} compressed in {duration} time")
261 return tgz_path
262
[end of conans/client/cmd/uploader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/cmd/uploader.py b/conans/client/cmd/uploader.py
--- a/conans/client/cmd/uploader.py
+++ b/conans/client/cmd/uploader.py
@@ -9,7 +9,8 @@
from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,
EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)
from conans.util.files import (clean_dirty, is_dirty, gather_files,
- gzopen_without_timestamps, set_dirty_context_manager, mkdir)
+ gzopen_without_timestamps, set_dirty_context_manager, mkdir,
+ human_size)
UPLOAD_POLICY_FORCE = "force-upload"
UPLOAD_POLICY_SKIP = "skip-upload"
@@ -220,10 +221,11 @@
def upload_recipe(self, ref, bundle, remote):
output = ConanOutput(scope=str(ref))
- output.info(f"Uploading recipe '{ref.repr_notime()}'")
- t1 = time.time()
cache_files = bundle["files"]
+ output.info(f"Uploading recipe '{ref.repr_notime()}' ({_total_size(cache_files)})")
+
+ t1 = time.time()
self._app.remote_manager.upload_recipe(ref, cache_files, remote)
duration = time.time() - t1
@@ -232,11 +234,12 @@
def upload_package(self, pref, prev_bundle, remote):
output = ConanOutput(scope=str(pref.ref))
- output.info(f"Uploading package '{pref.repr_notime()}'")
cache_files = prev_bundle["files"]
assert (pref.revision is not None), "Cannot upload a package without PREV"
assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
+ output.info(f"Uploading package '{pref.repr_notime()}' ({_total_size(cache_files)})")
+
t1 = time.time()
self._app.remote_manager.upload_package(pref, cache_files, remote)
duration = time.time() - t1
@@ -259,3 +262,11 @@
duration = time.time() - t1
ConanOutput().debug(f"{name} compressed in {duration} time")
return tgz_path
+
+
+def _total_size(cache_files):
+ total_size = 0
+ for file in cache_files.values():
+ stat = os.stat(file)
+ total_size += stat.st_size
+ return human_size(total_size)
| {"golden_diff": "diff --git a/conans/client/cmd/uploader.py b/conans/client/cmd/uploader.py\n--- a/conans/client/cmd/uploader.py\n+++ b/conans/client/cmd/uploader.py\n@@ -9,7 +9,8 @@\n from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,\n EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)\n from conans.util.files import (clean_dirty, is_dirty, gather_files,\n- gzopen_without_timestamps, set_dirty_context_manager, mkdir)\n+ gzopen_without_timestamps, set_dirty_context_manager, mkdir,\n+ human_size)\n \n UPLOAD_POLICY_FORCE = \"force-upload\"\n UPLOAD_POLICY_SKIP = \"skip-upload\"\n@@ -220,10 +221,11 @@\n \n def upload_recipe(self, ref, bundle, remote):\n output = ConanOutput(scope=str(ref))\n- output.info(f\"Uploading recipe '{ref.repr_notime()}'\")\n- t1 = time.time()\n cache_files = bundle[\"files\"]\n \n+ output.info(f\"Uploading recipe '{ref.repr_notime()}' ({_total_size(cache_files)})\")\n+\n+ t1 = time.time()\n self._app.remote_manager.upload_recipe(ref, cache_files, remote)\n \n duration = time.time() - t1\n@@ -232,11 +234,12 @@\n \n def upload_package(self, pref, prev_bundle, remote):\n output = ConanOutput(scope=str(pref.ref))\n- output.info(f\"Uploading package '{pref.repr_notime()}'\")\n cache_files = prev_bundle[\"files\"]\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n \n+ output.info(f\"Uploading package '{pref.repr_notime()}' ({_total_size(cache_files)})\")\n+\n t1 = time.time()\n self._app.remote_manager.upload_package(pref, cache_files, remote)\n duration = time.time() - t1\n@@ -259,3 +262,11 @@\n duration = time.time() - t1\n ConanOutput().debug(f\"{name} compressed in {duration} time\")\n return tgz_path\n+\n+\n+def _total_size(cache_files):\n+ total_size = 0\n+ for file in cache_files.values():\n+ stat = os.stat(file)\n+ total_size += stat.st_size\n+ return human_size(total_size)\n", "issue": "[feature] show uploaded files size in `conan upload`\n<!-- What is your suggestion? Please be as specific as possible! -->\r\nIt would be very convenient to provide the size of files which will be upload to specific remote via `conan upload ...` in diagnostic purposes, e.g. sometimes artifacts could be very large and CI could refuse its uploading, see e.g. [this StackOverflow question](https://stackoverflow.com/questions/64329087/gitlab-self-hosted-error-uploading-artifacts-as-archive-to-coordinator-to) as example of related CI error. With this change CI administrators could adjust the file limit at CI without trial and error changing of CI configs.\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n\n", "before_files": [{"content": "import os\nimport shutil\nimport time\n\nfrom conan.internal.conan_app import ConanApp\nfrom conan.api.output import ConanOutput\nfrom conans.client.source import retrieve_exports_sources\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,\n EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)\nfrom conans.util.files import (clean_dirty, is_dirty, gather_files,\n gzopen_without_timestamps, set_dirty_context_manager, mkdir)\n\nUPLOAD_POLICY_FORCE = \"force-upload\"\nUPLOAD_POLICY_SKIP = \"skip-upload\"\n\n\nclass UploadUpstreamChecker:\n \"\"\" decides if something needs to be uploaded or force-uploaded checking if that exact\n revision already exists in the remote server, or if the --force parameter is forcing the upload\n This is completely irrespective of the actual package contents, it only uses the local\n computed revision and the remote one\n \"\"\"\n def __init__(self, app: ConanApp):\n self._app = app\n\n def check(self, upload_bundle, remote, force):\n for ref, recipe_bundle in upload_bundle.refs().items():\n self._check_upstream_recipe(ref, recipe_bundle, remote, force)\n for pref, prev_bundle in upload_bundle.prefs(ref, recipe_bundle).items():\n self._check_upstream_package(pref, prev_bundle, remote, force)\n\n def _check_upstream_recipe(self, ref, ref_bundle, remote, force):\n output = ConanOutput(scope=str(ref))\n output.info(\"Checking which revisions exist in the remote server\")\n try:\n assert ref.revision\n # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence\n server_ref = self._app.remote_manager.get_recipe_revision_reference(ref, remote)\n assert server_ref # If successful (not raising NotFoundException), this will exist\n except NotFoundException:\n ref_bundle[\"force_upload\"] = False\n ref_bundle[\"upload\"] = True\n else:\n if force:\n output.info(f\"Recipe '{ref.repr_notime()}' already in server, forcing upload\")\n ref_bundle[\"force_upload\"] = True\n ref_bundle[\"upload\"] = True\n else:\n output.info(f\"Recipe '{ref.repr_notime()}' already in server, skipping upload\")\n ref_bundle[\"upload\"] = False\n ref_bundle[\"force_upload\"] = False\n\n def _check_upstream_package(self, pref, prev_bundle, remote, force):\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n\n try:\n # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence\n server_revisions = self._app.remote_manager.get_package_revision_reference(pref, remote)\n assert server_revisions\n except NotFoundException:\n prev_bundle[\"force_upload\"] = False\n prev_bundle[\"upload\"] = True\n else:\n output = ConanOutput(scope=str(pref.ref))\n if force:\n output.info(f\"Package '{pref.repr_notime()}' already in server, forcing upload\")\n prev_bundle[\"force_upload\"] = True\n prev_bundle[\"upload\"] = True\n else:\n output.info(f\"Package '{pref.repr_notime()}' already in server, skipping upload\")\n prev_bundle[\"force_upload\"] = False\n prev_bundle[\"upload\"] = False\n\n\nclass PackagePreparator:\n def __init__(self, app: ConanApp, global_conf):\n self._app = app\n self._global_conf = global_conf\n\n def prepare(self, upload_bundle, enabled_remotes):\n for ref, bundle in upload_bundle.refs().items():\n layout = self._app.cache.recipe_layout(ref)\n conanfile_path = layout.conanfile()\n conanfile = self._app.loader.load_basic(conanfile_path)\n\n if bundle.get(\"upload\"):\n self._prepare_recipe(ref, bundle, conanfile, enabled_remotes)\n for pref, prev_bundle in upload_bundle.prefs(ref, bundle).items():\n if prev_bundle.get(\"upload\"):\n self._prepare_package(pref, prev_bundle)\n\n def _prepare_recipe(self, ref, ref_bundle, conanfile, remotes):\n \"\"\" do a bunch of things that are necessary before actually executing the upload:\n - retrieve exports_sources to complete the recipe if necessary\n - compress the artifacts in conan_export.tgz and conan_export_sources.tgz\n \"\"\"\n try:\n recipe_layout = self._app.cache.recipe_layout(ref)\n retrieve_exports_sources(self._app.remote_manager, recipe_layout, conanfile, ref,\n remotes)\n cache_files = self._compress_recipe_files(recipe_layout, ref)\n ref_bundle[\"files\"] = cache_files\n except Exception as e:\n raise ConanException(f\"{ref} Error while compressing: {e}\")\n\n def _compress_recipe_files(self, layout, ref):\n download_export_folder = layout.download_export()\n\n output = ConanOutput(scope=str(ref))\n for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):\n tgz_path = os.path.join(download_export_folder, f)\n if is_dirty(tgz_path):\n output.warning(\"Removing %s, marked as dirty\" % f)\n os.remove(tgz_path)\n clean_dirty(tgz_path)\n\n export_folder = layout.export()\n files, symlinked_folders = gather_files(export_folder)\n files.update(symlinked_folders)\n if CONANFILE not in files or CONAN_MANIFEST not in files:\n raise ConanException(\"Cannot upload corrupted recipe '%s'\" % str(ref))\n export_src_folder = layout.export_sources()\n src_files, src_symlinked_folders = gather_files(export_src_folder)\n src_files.update(src_symlinked_folders)\n\n # We do a copy of conanfile and conanmanifest to the download_export_folder\n # so it is identical as when it is downloaded, and all files are from the same location\n # to be uploaded\n mkdir(download_export_folder)\n shutil.copy2(os.path.join(export_folder, CONANFILE),\n os.path.join(download_export_folder, CONANFILE))\n shutil.copy2(os.path.join(export_folder, CONAN_MANIFEST),\n os.path.join(download_export_folder, CONAN_MANIFEST))\n result = {CONANFILE: os.path.join(download_export_folder, CONANFILE),\n CONAN_MANIFEST: os.path.join(download_export_folder, CONAN_MANIFEST)}\n # Files NOT included in the tgz\n files.pop(CONANFILE)\n files.pop(CONAN_MANIFEST)\n\n def add_tgz(tgz_name, tgz_files):\n tgz = os.path.join(download_export_folder, tgz_name)\n if os.path.isfile(tgz):\n result[tgz_name] = tgz\n elif tgz_files:\n compresslevel = self._global_conf.get(\"core.gzip:compresslevel\", check_type=int)\n tgz = compress_files(tgz_files, tgz_name, download_export_folder,\n compresslevel=compresslevel, ref=ref)\n result[tgz_name] = tgz\n\n add_tgz(EXPORT_TGZ_NAME, files)\n add_tgz(EXPORT_SOURCES_TGZ_NAME, src_files)\n return result\n\n def _prepare_package(self, pref, prev_bundle):\n pkg_layout = self._app.cache.pkg_layout(pref)\n if pkg_layout.package_is_dirty():\n raise ConanException(f\"Package {pref} is corrupted, aborting upload.\\n\"\n f\"Remove it with 'conan remove {pref}'\")\n cache_files = self._compress_package_files(pkg_layout, pref)\n prev_bundle[\"files\"] = cache_files\n\n def _compress_package_files(self, layout, pref):\n output = ConanOutput(scope=str(pref))\n download_pkg_folder = layout.download_package()\n package_tgz = os.path.join(download_pkg_folder, PACKAGE_TGZ_NAME)\n if is_dirty(package_tgz):\n output.warning(\"Removing %s, marked as dirty\" % PACKAGE_TGZ_NAME)\n os.remove(package_tgz)\n clean_dirty(package_tgz)\n\n # Get all the files in that directory\n # existing package, will use short paths if defined\n package_folder = layout.package()\n files, symlinked_folders = gather_files(package_folder)\n files.update(symlinked_folders)\n\n if CONANINFO not in files or CONAN_MANIFEST not in files:\n raise ConanException(\"Cannot upload corrupted package '%s'\" % str(pref))\n\n # Do a copy so the location of CONANINFO and MANIFEST is the \"download\" folder one\n mkdir(download_pkg_folder)\n shutil.copy2(os.path.join(package_folder, CONANINFO),\n os.path.join(download_pkg_folder, CONANINFO))\n shutil.copy2(os.path.join(package_folder, CONAN_MANIFEST),\n os.path.join(download_pkg_folder, CONAN_MANIFEST))\n # Files NOT included in the tgz\n files.pop(CONANINFO)\n files.pop(CONAN_MANIFEST)\n\n if not os.path.isfile(package_tgz):\n tgz_files = {f: path for f, path in files.items()}\n compresslevel = self._global_conf.get(\"core.gzip:compresslevel\", check_type=int)\n tgz_path = compress_files(tgz_files, PACKAGE_TGZ_NAME, download_pkg_folder,\n compresslevel=compresslevel, ref=pref)\n assert tgz_path == package_tgz\n assert os.path.exists(package_tgz)\n\n return {PACKAGE_TGZ_NAME: package_tgz,\n CONANINFO: os.path.join(download_pkg_folder, CONANINFO),\n CONAN_MANIFEST: os.path.join(download_pkg_folder, CONAN_MANIFEST)}\n\n\nclass UploadExecutor:\n \"\"\" does the actual file transfer to the remote. The files to be uploaded have already\n been computed and are passed in the ``upload_data`` parameter, so this executor is also\n agnostic about which files are transferred\n \"\"\"\n def __init__(self, app: ConanApp):\n self._app = app\n\n def upload(self, upload_data, remote):\n for ref, bundle in upload_data.refs().items():\n if bundle.get(\"upload\"):\n self.upload_recipe(ref, bundle, remote)\n for pref, prev_bundle in upload_data.prefs(ref, bundle).items():\n if prev_bundle.get(\"upload\"):\n self.upload_package(pref, prev_bundle, remote)\n\n def upload_recipe(self, ref, bundle, remote):\n output = ConanOutput(scope=str(ref))\n output.info(f\"Uploading recipe '{ref.repr_notime()}'\")\n t1 = time.time()\n cache_files = bundle[\"files\"]\n\n self._app.remote_manager.upload_recipe(ref, cache_files, remote)\n\n duration = time.time() - t1\n output.debug(f\"Upload {ref} in {duration} time\")\n return ref\n\n def upload_package(self, pref, prev_bundle, remote):\n output = ConanOutput(scope=str(pref.ref))\n output.info(f\"Uploading package '{pref.repr_notime()}'\")\n cache_files = prev_bundle[\"files\"]\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n\n t1 = time.time()\n self._app.remote_manager.upload_package(pref, cache_files, remote)\n duration = time.time() - t1\n output.debug(f\"Upload {pref} in {duration} time\")\n\n\ndef compress_files(files, name, dest_dir, compresslevel=None, ref=None):\n t1 = time.time()\n # FIXME, better write to disk sequentially and not keep tgz contents in memory\n tgz_path = os.path.join(dest_dir, name)\n ConanOutput(scope=str(ref)).info(f\"Compressing {name}\")\n with set_dirty_context_manager(tgz_path), open(tgz_path, \"wb\") as tgz_handle:\n tgz = gzopen_without_timestamps(name, mode=\"w\", fileobj=tgz_handle,\n compresslevel=compresslevel)\n for filename, abs_path in sorted(files.items()):\n # recursive is False in case it is a symlink to a folder\n tgz.add(abs_path, filename, recursive=False)\n tgz.close()\n\n duration = time.time() - t1\n ConanOutput().debug(f\"{name} compressed in {duration} time\")\n return tgz_path\n", "path": "conans/client/cmd/uploader.py"}]} | 4,091 | 555 |
gh_patches_debug_14944 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1788 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move getting started guide to opentelemetry.io
For GA there is an ask to add at least all getting started documentation to the opentelemetry.io website -- for Python this is tracked in: https://github.com/open-telemetry/opentelemetry.io/issues/285. Maintaining the documentation in two places is not ideal so proposal is to move getting started documentation. A similar activity already happened for Java.
</issue>
<code>
[start of docs/getting_started/otlpcollector_example.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # otcollector.py
16 import time
17
18 from opentelemetry import trace
19 from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
20 OTLPSpanExporter,
21 )
22 from opentelemetry.sdk.trace import TracerProvider
23 from opentelemetry.sdk.trace.export import BatchSpanProcessor
24
25 span_exporter = OTLPSpanExporter(
26 # optional
27 # endpoint:="myCollectorURL:55678",
28 # credentials=ChannelCredentials(credentials),
29 # headers=(("metadata", "metadata")),
30 )
31 tracer_provider = TracerProvider()
32 trace.set_tracer_provider(tracer_provider)
33 span_processor = BatchSpanProcessor(span_exporter)
34 tracer_provider.add_span_processor(span_processor)
35
36 # Configure the tracer to use the collector exporter
37 tracer = trace.get_tracer_provider().get_tracer(__name__)
38
39 with tracer.start_as_current_span("foo"):
40 print("Hello world!")
41
[end of docs/getting_started/otlpcollector_example.py]
[start of docs/getting_started/tracing_example.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # tracing.py
16 from opentelemetry import trace
17 from opentelemetry.sdk.trace import TracerProvider
18 from opentelemetry.sdk.trace.export import (
19 ConsoleSpanExporter,
20 SimpleSpanProcessor,
21 )
22
23 trace.set_tracer_provider(TracerProvider())
24 trace.get_tracer_provider().add_span_processor(
25 SimpleSpanProcessor(ConsoleSpanExporter())
26 )
27
28 tracer = trace.get_tracer(__name__)
29
30 with tracer.start_as_current_span("foo"):
31 with tracer.start_as_current_span("bar"):
32 with tracer.start_as_current_span("baz"):
33 print("Hello world from OpenTelemetry Python!")
34
[end of docs/getting_started/tracing_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py
--- a/docs/getting_started/otlpcollector_example.py
+++ b/docs/getting_started/otlpcollector_example.py
@@ -24,7 +24,7 @@
span_exporter = OTLPSpanExporter(
# optional
- # endpoint:="myCollectorURL:55678",
+ # endpoint:="myCollectorURL:4317",
# credentials=ChannelCredentials(credentials),
# headers=(("metadata", "metadata")),
)
diff --git a/docs/getting_started/tracing_example.py b/docs/getting_started/tracing_example.py
--- a/docs/getting_started/tracing_example.py
+++ b/docs/getting_started/tracing_example.py
@@ -20,10 +20,11 @@
SimpleSpanProcessor,
)
-trace.set_tracer_provider(TracerProvider())
-trace.get_tracer_provider().add_span_processor(
- SimpleSpanProcessor(ConsoleSpanExporter())
-)
+provider = TracerProvider()
+processor = SimpleSpanProcessor(ConsoleSpanExporter())
+provider.add_span_processor(processor)
+trace.set_tracer_provider(provider)
+
tracer = trace.get_tracer(__name__)
| {"golden_diff": "diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py\n--- a/docs/getting_started/otlpcollector_example.py\n+++ b/docs/getting_started/otlpcollector_example.py\n@@ -24,7 +24,7 @@\n \n span_exporter = OTLPSpanExporter(\n # optional\n- # endpoint:=\"myCollectorURL:55678\",\n+ # endpoint:=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n )\ndiff --git a/docs/getting_started/tracing_example.py b/docs/getting_started/tracing_example.py\n--- a/docs/getting_started/tracing_example.py\n+++ b/docs/getting_started/tracing_example.py\n@@ -20,10 +20,11 @@\n SimpleSpanProcessor,\n )\n \n-trace.set_tracer_provider(TracerProvider())\n-trace.get_tracer_provider().add_span_processor(\n- SimpleSpanProcessor(ConsoleSpanExporter())\n-)\n+provider = TracerProvider()\n+processor = SimpleSpanProcessor(ConsoleSpanExporter())\n+provider.add_span_processor(processor)\n+trace.set_tracer_provider(provider)\n+\n \n tracer = trace.get_tracer(__name__)\n", "issue": "Move getting started guide to opentelemetry.io\nFor GA there is an ask to add at least all getting started documentation to the opentelemetry.io website -- for Python this is tracked in: https://github.com/open-telemetry/opentelemetry.io/issues/285. Maintaining the documentation in two places is not ideal so proposal is to move getting started documentation. A similar activity already happened for Java.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint:=\"myCollectorURL:55678\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# tracing.py\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntrace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n)\n\ntracer = trace.get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n", "path": "docs/getting_started/tracing_example.py"}]} | 1,350 | 271 |
gh_patches_debug_42548 | rasdani/github-patches | git_diff | deepset-ai__haystack-7841 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAP and MRR wrong for multiple gold documents
**Describe the bug**
Both the MAP and the MRR show wrong values. It seems we calculate the score for single gold documents and then override it after each instead of calculating it for the whole batch of gold documents.
**Expected behavior**
correct values
**To Reproduce**
```
retrieved_docs = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"]
gold_docs = ["one", "two", "three","four","seven"]
from haystack.components.evaluators import DocumentMAPEvaluator, DocumentMRREvaluator
from haystack import Document
mapevaluator = DocumentMAPEvaluator()
mrrevaluator = DocumentMRREvaluator()
mapresult = mapevaluator.run(
ground_truth_documents=[[Document(content=content) for content in gold_docs]],
retrieved_documents=[[Document(content=content) for content in retrieved_docs]])
mrrresult = mrrevaluator.run(
ground_truth_documents=[[Document(content=content) for content in gold_docs]],
retrieved_documents=[[Document(content=content) for content in retrieved_docs]])
print(mapresult["individual_scores"])
print(mrrresult["individual_scores"])
print(mapresult["score"])
print(mrrresult["score"])
```
</issue>
<code>
[start of haystack/components/evaluators/document_mrr.py]
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 from typing import Any, Dict, List
6
7 from haystack import Document, component
8
9
10 @component
11 class DocumentMRREvaluator:
12 """
13 Evaluator that calculates the mean reciprocal rank of the retrieved documents.
14
15 MRR measures how high the first retrieved document is ranked.
16 Each question can have multiple ground truth documents and multiple retrieved documents.
17
18 `DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
19 should be used to clean and normalize the documents before passing them to this evaluator.
20
21 Usage example:
22 ```python
23 from haystack import Document
24 from haystack.components.evaluators import DocumentMRREvaluator
25
26 evaluator = DocumentMRREvaluator()
27 result = evaluator.run(
28 ground_truth_documents=[
29 [Document(content="France")],
30 [Document(content="9th century"), Document(content="9th")],
31 ],
32 retrieved_documents=[
33 [Document(content="France")],
34 [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
35 ],
36 )
37 print(result["individual_scores"])
38 # [1.0, 1.0]
39 print(result["score"])
40 # 1.0
41 ```
42 """
43
44 @component.output_types(score=float, individual_scores=List[float])
45 def run(
46 self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
47 ) -> Dict[str, Any]:
48 """
49 Run the DocumentMRREvaluator on the given inputs.
50
51 `ground_truth_documents` and `retrieved_documents` must have the same length.
52
53 :param ground_truth_documents:
54 A list of expected documents for each question.
55 :param retrieved_documents:
56 A list of retrieved documents for each question.
57 :returns:
58 A dictionary with the following outputs:
59 - `score` - The average of calculated scores.
60 - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked.
61 """
62 if len(ground_truth_documents) != len(retrieved_documents):
63 msg = "The length of ground_truth_documents and retrieved_documents must be the same."
64 raise ValueError(msg)
65
66 individual_scores = []
67
68 for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
69 score = 0.0
70 for ground_document in ground_truth:
71 if ground_document.content is None:
72 continue
73
74 for rank, retrieved_document in enumerate(retrieved):
75 if retrieved_document.content is None:
76 continue
77
78 if ground_document.content in retrieved_document.content:
79 score = 1 / (rank + 1)
80 break
81 individual_scores.append(score)
82
83 score = sum(individual_scores) / len(retrieved_documents)
84
85 return {"score": score, "individual_scores": individual_scores}
86
[end of haystack/components/evaluators/document_mrr.py]
[start of haystack/components/evaluators/document_map.py]
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 from typing import Any, Dict, List
6
7 from haystack import Document, component
8
9
10 @component
11 class DocumentMAPEvaluator:
12 """
13 A Mean Average Precision (MAP) evaluator for documents.
14
15 Evaluator that calculates the mean average precision of the retrieved documents, a metric
16 that measures how high retrieved documents are ranked.
17 Each question can have multiple ground truth documents and multiple retrieved documents.
18
19 `DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
20 should be used to clean and normalize the documents before passing them to this evaluator.
21
22 Usage example:
23 ```python
24 from haystack import Document
25 from haystack.components.evaluators import DocumentMAPEvaluator
26
27 evaluator = DocumentMAPEvaluator()
28 result = evaluator.run(
29 ground_truth_documents=[
30 [Document(content="France")],
31 [Document(content="9th century"), Document(content="9th")],
32 ],
33 retrieved_documents=[
34 [Document(content="France")],
35 [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
36 ],
37 )
38
39 print(result["individual_scores"])
40 # [1.0, 0.8333333333333333]
41 print(result["score"])
42 # 0.9166666666666666
43 ```
44 """
45
46 @component.output_types(score=float, individual_scores=List[float])
47 def run(
48 self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
49 ) -> Dict[str, Any]:
50 """
51 Run the DocumentMAPEvaluator on the given inputs.
52
53 All lists must have the same length.
54
55 :param ground_truth_documents:
56 A list of expected documents for each question.
57 :param retrieved_documents:
58 A list of retrieved documents for each question.
59 :returns:
60 A dictionary with the following outputs:
61 - `score` - The average of calculated scores.
62 - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked.
63 """
64 if len(ground_truth_documents) != len(retrieved_documents):
65 msg = "The length of ground_truth_documents and retrieved_documents must be the same."
66 raise ValueError(msg)
67
68 individual_scores = []
69
70 for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
71 score = 0.0
72 for ground_document in ground_truth:
73 if ground_document.content is None:
74 continue
75
76 average_precision = 0.0
77 relevant_documents = 0
78
79 for rank, retrieved_document in enumerate(retrieved):
80 if retrieved_document.content is None:
81 continue
82
83 if ground_document.content in retrieved_document.content:
84 relevant_documents += 1
85 average_precision += relevant_documents / (rank + 1)
86 if relevant_documents > 0:
87 score = average_precision / relevant_documents
88 individual_scores.append(score)
89
90 score = sum(individual_scores) / len(retrieved_documents)
91
92 return {"score": score, "individual_scores": individual_scores}
93
[end of haystack/components/evaluators/document_map.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/components/evaluators/document_map.py b/haystack/components/evaluators/document_map.py
--- a/haystack/components/evaluators/document_map.py
+++ b/haystack/components/evaluators/document_map.py
@@ -43,6 +43,7 @@
```
"""
+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.
@component.output_types(score=float, individual_scores=List[float])
def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
@@ -68,25 +69,21 @@
individual_scores = []
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
- score = 0.0
- for ground_document in ground_truth:
- if ground_document.content is None:
- continue
-
- average_precision = 0.0
- relevant_documents = 0
+ average_precision = 0.0
+ average_precision_numerator = 0.0
+ relevant_documents = 0
- for rank, retrieved_document in enumerate(retrieved):
- if retrieved_document.content is None:
- continue
-
- if ground_document.content in retrieved_document.content:
- relevant_documents += 1
- average_precision += relevant_documents / (rank + 1)
- if relevant_documents > 0:
- score = average_precision / relevant_documents
- individual_scores.append(score)
+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]
+ for rank, retrieved_document in enumerate(retrieved):
+ if retrieved_document.content is None:
+ continue
- score = sum(individual_scores) / len(retrieved_documents)
+ if retrieved_document.content in ground_truth_contents:
+ relevant_documents += 1
+ average_precision_numerator += relevant_documents / (rank + 1)
+ if relevant_documents > 0:
+ average_precision = average_precision_numerator / relevant_documents
+ individual_scores.append(average_precision)
+ score = sum(individual_scores) / len(ground_truth_documents)
return {"score": score, "individual_scores": individual_scores}
diff --git a/haystack/components/evaluators/document_mrr.py b/haystack/components/evaluators/document_mrr.py
--- a/haystack/components/evaluators/document_mrr.py
+++ b/haystack/components/evaluators/document_mrr.py
@@ -41,6 +41,7 @@
```
"""
+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.
@component.output_types(score=float, individual_scores=List[float])
def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
@@ -66,20 +67,17 @@
individual_scores = []
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
- score = 0.0
- for ground_document in ground_truth:
- if ground_document.content is None:
- continue
-
- for rank, retrieved_document in enumerate(retrieved):
- if retrieved_document.content is None:
- continue
+ reciprocal_rank = 0.0
- if ground_document.content in retrieved_document.content:
- score = 1 / (rank + 1)
- break
- individual_scores.append(score)
+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]
+ for rank, retrieved_document in enumerate(retrieved):
+ if retrieved_document.content is None:
+ continue
+ if retrieved_document.content in ground_truth_contents:
+ reciprocal_rank = 1 / (rank + 1)
+ break
+ individual_scores.append(reciprocal_rank)
- score = sum(individual_scores) / len(retrieved_documents)
+ score = sum(individual_scores) / len(ground_truth_documents)
return {"score": score, "individual_scores": individual_scores}
| {"golden_diff": "diff --git a/haystack/components/evaluators/document_map.py b/haystack/components/evaluators/document_map.py\n--- a/haystack/components/evaluators/document_map.py\n+++ b/haystack/components/evaluators/document_map.py\n@@ -43,6 +43,7 @@\n ```\n \"\"\"\n \n+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n@@ -68,25 +69,21 @@\n individual_scores = []\n \n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n- score = 0.0\n- for ground_document in ground_truth:\n- if ground_document.content is None:\n- continue\n-\n- average_precision = 0.0\n- relevant_documents = 0\n+ average_precision = 0.0\n+ average_precision_numerator = 0.0\n+ relevant_documents = 0\n \n- for rank, retrieved_document in enumerate(retrieved):\n- if retrieved_document.content is None:\n- continue\n-\n- if ground_document.content in retrieved_document.content:\n- relevant_documents += 1\n- average_precision += relevant_documents / (rank + 1)\n- if relevant_documents > 0:\n- score = average_precision / relevant_documents\n- individual_scores.append(score)\n+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]\n+ for rank, retrieved_document in enumerate(retrieved):\n+ if retrieved_document.content is None:\n+ continue\n \n- score = sum(individual_scores) / len(retrieved_documents)\n+ if retrieved_document.content in ground_truth_contents:\n+ relevant_documents += 1\n+ average_precision_numerator += relevant_documents / (rank + 1)\n+ if relevant_documents > 0:\n+ average_precision = average_precision_numerator / relevant_documents\n+ individual_scores.append(average_precision)\n \n+ score = sum(individual_scores) / len(ground_truth_documents)\n return {\"score\": score, \"individual_scores\": individual_scores}\ndiff --git a/haystack/components/evaluators/document_mrr.py b/haystack/components/evaluators/document_mrr.py\n--- a/haystack/components/evaluators/document_mrr.py\n+++ b/haystack/components/evaluators/document_mrr.py\n@@ -41,6 +41,7 @@\n ```\n \"\"\"\n \n+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n@@ -66,20 +67,17 @@\n individual_scores = []\n \n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n- score = 0.0\n- for ground_document in ground_truth:\n- if ground_document.content is None:\n- continue\n-\n- for rank, retrieved_document in enumerate(retrieved):\n- if retrieved_document.content is None:\n- continue\n+ reciprocal_rank = 0.0\n \n- if ground_document.content in retrieved_document.content:\n- score = 1 / (rank + 1)\n- break\n- individual_scores.append(score)\n+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]\n+ for rank, retrieved_document in enumerate(retrieved):\n+ if retrieved_document.content is None:\n+ continue\n+ if retrieved_document.content in ground_truth_contents:\n+ reciprocal_rank = 1 / (rank + 1)\n+ break\n+ individual_scores.append(reciprocal_rank)\n \n- score = sum(individual_scores) / len(retrieved_documents)\n+ score = sum(individual_scores) / len(ground_truth_documents)\n \n return {\"score\": score, \"individual_scores\": individual_scores}\n", "issue": "MAP and MRR wrong for multiple gold documents\n**Describe the bug**\r\nBoth the MAP and the MRR show wrong values. It seems we calculate the score for single gold documents and then override it after each instead of calculating it for the whole batch of gold documents. \r\n\r\n**Expected behavior**\r\ncorrect values\r\n\r\n**To Reproduce**\r\n```\r\nretrieved_docs = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\"]\r\ngold_docs = [\"one\", \"two\", \"three\",\"four\",\"seven\"]\r\n\r\nfrom haystack.components.evaluators import DocumentMAPEvaluator, DocumentMRREvaluator\r\nfrom haystack import Document\r\n\r\nmapevaluator = DocumentMAPEvaluator()\r\nmrrevaluator = DocumentMRREvaluator()\r\nmapresult = mapevaluator.run(\r\n ground_truth_documents=[[Document(content=content) for content in gold_docs]],\r\n retrieved_documents=[[Document(content=content) for content in retrieved_docs]])\r\n\r\nmrrresult = mrrevaluator.run(\r\n ground_truth_documents=[[Document(content=content) for content in gold_docs]],\r\n retrieved_documents=[[Document(content=content) for content in retrieved_docs]])\r\n\r\nprint(mapresult[\"individual_scores\"])\r\nprint(mrrresult[\"individual_scores\"])\r\nprint(mapresult[\"score\"])\r\nprint(mrrresult[\"score\"])\r\n```\r\n\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentMRREvaluator:\n \"\"\"\n Evaluator that calculates the mean reciprocal rank of the retrieved documents.\n\n MRR measures how high the first retrieved document is ranked.\n Each question can have multiple ground truth documents and multiple retrieved documents.\n\n `DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component\n should be used to clean and normalize the documents before passing them to this evaluator.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.evaluators import DocumentMRREvaluator\n\n evaluator = DocumentMRREvaluator()\n result = evaluator.run(\n ground_truth_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"9th\")],\n ],\n retrieved_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"10th century\"), Document(content=\"9th\")],\n ],\n )\n print(result[\"individual_scores\"])\n # [1.0, 1.0]\n print(result[\"score\"])\n # 1.0\n ```\n \"\"\"\n\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n ) -> Dict[str, Any]:\n \"\"\"\n Run the DocumentMRREvaluator on the given inputs.\n\n `ground_truth_documents` and `retrieved_documents` must have the same length.\n\n :param ground_truth_documents:\n A list of expected documents for each question.\n :param retrieved_documents:\n A list of retrieved documents for each question.\n :returns:\n A dictionary with the following outputs:\n - `score` - The average of calculated scores.\n - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked.\n \"\"\"\n if len(ground_truth_documents) != len(retrieved_documents):\n msg = \"The length of ground_truth_documents and retrieved_documents must be the same.\"\n raise ValueError(msg)\n\n individual_scores = []\n\n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n score = 0.0\n for ground_document in ground_truth:\n if ground_document.content is None:\n continue\n\n for rank, retrieved_document in enumerate(retrieved):\n if retrieved_document.content is None:\n continue\n\n if ground_document.content in retrieved_document.content:\n score = 1 / (rank + 1)\n break\n individual_scores.append(score)\n\n score = sum(individual_scores) / len(retrieved_documents)\n\n return {\"score\": score, \"individual_scores\": individual_scores}\n", "path": "haystack/components/evaluators/document_mrr.py"}, {"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentMAPEvaluator:\n \"\"\"\n A Mean Average Precision (MAP) evaluator for documents.\n\n Evaluator that calculates the mean average precision of the retrieved documents, a metric\n that measures how high retrieved documents are ranked.\n Each question can have multiple ground truth documents and multiple retrieved documents.\n\n `DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component\n should be used to clean and normalize the documents before passing them to this evaluator.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.evaluators import DocumentMAPEvaluator\n\n evaluator = DocumentMAPEvaluator()\n result = evaluator.run(\n ground_truth_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"9th\")],\n ],\n retrieved_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"10th century\"), Document(content=\"9th\")],\n ],\n )\n\n print(result[\"individual_scores\"])\n # [1.0, 0.8333333333333333]\n print(result[\"score\"])\n # 0.9166666666666666\n ```\n \"\"\"\n\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n ) -> Dict[str, Any]:\n \"\"\"\n Run the DocumentMAPEvaluator on the given inputs.\n\n All lists must have the same length.\n\n :param ground_truth_documents:\n A list of expected documents for each question.\n :param retrieved_documents:\n A list of retrieved documents for each question.\n :returns:\n A dictionary with the following outputs:\n - `score` - The average of calculated scores.\n - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked.\n \"\"\"\n if len(ground_truth_documents) != len(retrieved_documents):\n msg = \"The length of ground_truth_documents and retrieved_documents must be the same.\"\n raise ValueError(msg)\n\n individual_scores = []\n\n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n score = 0.0\n for ground_document in ground_truth:\n if ground_document.content is None:\n continue\n\n average_precision = 0.0\n relevant_documents = 0\n\n for rank, retrieved_document in enumerate(retrieved):\n if retrieved_document.content is None:\n continue\n\n if ground_document.content in retrieved_document.content:\n relevant_documents += 1\n average_precision += relevant_documents / (rank + 1)\n if relevant_documents > 0:\n score = average_precision / relevant_documents\n individual_scores.append(score)\n\n score = sum(individual_scores) / len(retrieved_documents)\n\n return {\"score\": score, \"individual_scores\": individual_scores}\n", "path": "haystack/components/evaluators/document_map.py"}]} | 2,545 | 907 |
gh_patches_debug_2272 | rasdani/github-patches | git_diff | python-pillow__Pillow-6481 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PSD incorrectly loaded
### What did you do?
I opened the TIFF in Pillow and converted it to JPG.
### What did you expect to happen?
The JPG image to look the same as the original TIFF.
### What actually happened?
The converted JPG looks malformed and has messed up colors.
### What are your OS, Python and Pillow versions?
* OS: Linux
* Python: 3.10.5
* Pillow: 9.1.1 (also tested -git)
```python
>>> img = Image.open("3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif")
>>> out_img = img.convert("RGB")
>>> out_img.save("converted.jpg", quality=95)
```
[original image](https://api.collectie.gent/storage/v1/download/3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif) (beware, 274MB)
[converted image](https://api.collectie.gent/storage/v1/download/3a029a4f48b480211286486a6a1f0f0b-transcode-OA_535_161_17_F_TE.jpg)
Is it okay to report this here or should I report this to the appropriate library (libtiff, jpeg-turbo, ?)
</issue>
<code>
[start of src/PIL/PsdImagePlugin.py]
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # Adobe PSD 2.5/3.0 file handling
6 #
7 # History:
8 # 1995-09-01 fl Created
9 # 1997-01-03 fl Read most PSD images
10 # 1997-01-18 fl Fixed P and CMYK support
11 # 2001-10-21 fl Added seek/tell support (for layers)
12 #
13 # Copyright (c) 1997-2001 by Secret Labs AB.
14 # Copyright (c) 1995-2001 by Fredrik Lundh
15 #
16 # See the README file for information on usage and redistribution.
17 #
18
19 import io
20
21 from . import Image, ImageFile, ImagePalette
22 from ._binary import i8
23 from ._binary import i16be as i16
24 from ._binary import i32be as i32
25 from ._binary import si16be as si16
26
27 MODES = {
28 # (photoshop mode, bits) -> (pil mode, required channels)
29 (0, 1): ("1", 1),
30 (0, 8): ("L", 1),
31 (1, 8): ("L", 1),
32 (2, 8): ("P", 1),
33 (3, 8): ("RGB", 3),
34 (4, 8): ("CMYK", 4),
35 (7, 8): ("L", 1), # FIXME: multilayer
36 (8, 8): ("L", 1), # duotone
37 (9, 8): ("LAB", 3),
38 }
39
40
41 # --------------------------------------------------------------------.
42 # read PSD images
43
44
45 def _accept(prefix):
46 return prefix[:4] == b"8BPS"
47
48
49 ##
50 # Image plugin for Photoshop images.
51
52
53 class PsdImageFile(ImageFile.ImageFile):
54
55 format = "PSD"
56 format_description = "Adobe Photoshop"
57 _close_exclusive_fp_after_loading = False
58
59 def _open(self):
60
61 read = self.fp.read
62
63 #
64 # header
65
66 s = read(26)
67 if not _accept(s) or i16(s, 4) != 1:
68 raise SyntaxError("not a PSD file")
69
70 psd_bits = i16(s, 22)
71 psd_channels = i16(s, 12)
72 psd_mode = i16(s, 24)
73
74 mode, channels = MODES[(psd_mode, psd_bits)]
75
76 if channels > psd_channels:
77 raise OSError("not enough channels")
78
79 self.mode = mode
80 self._size = i32(s, 18), i32(s, 14)
81
82 #
83 # color mode data
84
85 size = i32(read(4))
86 if size:
87 data = read(size)
88 if mode == "P" and size == 768:
89 self.palette = ImagePalette.raw("RGB;L", data)
90
91 #
92 # image resources
93
94 self.resources = []
95
96 size = i32(read(4))
97 if size:
98 # load resources
99 end = self.fp.tell() + size
100 while self.fp.tell() < end:
101 read(4) # signature
102 id = i16(read(2))
103 name = read(i8(read(1)))
104 if not (len(name) & 1):
105 read(1) # padding
106 data = read(i32(read(4)))
107 if len(data) & 1:
108 read(1) # padding
109 self.resources.append((id, name, data))
110 if id == 1039: # ICC profile
111 self.info["icc_profile"] = data
112
113 #
114 # layer and mask information
115
116 self.layers = []
117
118 size = i32(read(4))
119 if size:
120 end = self.fp.tell() + size
121 size = i32(read(4))
122 if size:
123 _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))
124 self.layers = _layerinfo(_layer_data, size)
125 self.fp.seek(end)
126 self.n_frames = len(self.layers)
127 self.is_animated = self.n_frames > 1
128
129 #
130 # image descriptor
131
132 self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
133
134 # keep the file open
135 self._fp = self.fp
136 self.frame = 1
137 self._min_frame = 1
138
139 def seek(self, layer):
140 if not self._seek_check(layer):
141 return
142
143 # seek to given layer (1..max)
144 try:
145 name, mode, bbox, tile = self.layers[layer - 1]
146 self.mode = mode
147 self.tile = tile
148 self.frame = layer
149 self.fp = self._fp
150 return name, bbox
151 except IndexError as e:
152 raise EOFError("no such layer") from e
153
154 def tell(self):
155 # return layer number (0=image, 1..max=layers)
156 return self.frame
157
158
159 def _layerinfo(fp, ct_bytes):
160 # read layerinfo block
161 layers = []
162
163 def read(size):
164 return ImageFile._safe_read(fp, size)
165
166 ct = si16(read(2))
167
168 # sanity check
169 if ct_bytes < (abs(ct) * 20):
170 raise SyntaxError("Layer block too short for number of layers requested")
171
172 for _ in range(abs(ct)):
173
174 # bounding box
175 y0 = i32(read(4))
176 x0 = i32(read(4))
177 y1 = i32(read(4))
178 x1 = i32(read(4))
179
180 # image info
181 mode = []
182 ct_types = i16(read(2))
183 types = list(range(ct_types))
184 if len(types) > 4:
185 continue
186
187 for _ in types:
188 type = i16(read(2))
189
190 if type == 65535:
191 m = "A"
192 else:
193 m = "RGBA"[type]
194
195 mode.append(m)
196 read(4) # size
197
198 # figure out the image mode
199 mode.sort()
200 if mode == ["R"]:
201 mode = "L"
202 elif mode == ["B", "G", "R"]:
203 mode = "RGB"
204 elif mode == ["A", "B", "G", "R"]:
205 mode = "RGBA"
206 else:
207 mode = None # unknown
208
209 # skip over blend flags and extra information
210 read(12) # filler
211 name = ""
212 size = i32(read(4)) # length of the extra data field
213 if size:
214 data_end = fp.tell() + size
215
216 length = i32(read(4))
217 if length:
218 fp.seek(length - 16, io.SEEK_CUR)
219
220 length = i32(read(4))
221 if length:
222 fp.seek(length, io.SEEK_CUR)
223
224 length = i8(read(1))
225 if length:
226 # Don't know the proper encoding,
227 # Latin-1 should be a good guess
228 name = read(length).decode("latin-1", "replace")
229
230 fp.seek(data_end)
231 layers.append((name, mode, (x0, y0, x1, y1)))
232
233 # get tiles
234 i = 0
235 for name, mode, bbox in layers:
236 tile = []
237 for m in mode:
238 t = _maketile(fp, m, bbox, 1)
239 if t:
240 tile.extend(t)
241 layers[i] = name, mode, bbox, tile
242 i += 1
243
244 return layers
245
246
247 def _maketile(file, mode, bbox, channels):
248
249 tile = None
250 read = file.read
251
252 compression = i16(read(2))
253
254 xsize = bbox[2] - bbox[0]
255 ysize = bbox[3] - bbox[1]
256
257 offset = file.tell()
258
259 if compression == 0:
260 #
261 # raw compression
262 tile = []
263 for channel in range(channels):
264 layer = mode[channel]
265 if mode == "CMYK":
266 layer += ";I"
267 tile.append(("raw", bbox, offset, layer))
268 offset = offset + xsize * ysize
269
270 elif compression == 1:
271 #
272 # packbits compression
273 i = 0
274 tile = []
275 bytecount = read(channels * ysize * 2)
276 offset = file.tell()
277 for channel in range(channels):
278 layer = mode[channel]
279 if mode == "CMYK":
280 layer += ";I"
281 tile.append(("packbits", bbox, offset, layer))
282 for y in range(ysize):
283 offset = offset + i16(bytecount, i)
284 i += 2
285
286 file.seek(offset)
287
288 if offset & 1:
289 read(1) # padding
290
291 return tile
292
293
294 # --------------------------------------------------------------------
295 # registry
296
297
298 Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
299
300 Image.register_extension(PsdImageFile.format, ".psd")
301
302 Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop")
303
[end of src/PIL/PsdImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py
--- a/src/PIL/PsdImagePlugin.py
+++ b/src/PIL/PsdImagePlugin.py
@@ -75,6 +75,9 @@
if channels > psd_channels:
raise OSError("not enough channels")
+ if mode == "RGB" and psd_channels == 4:
+ mode = "RGBA"
+ channels = 4
self.mode = mode
self._size = i32(s, 18), i32(s, 14)
| {"golden_diff": "diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py\n--- a/src/PIL/PsdImagePlugin.py\n+++ b/src/PIL/PsdImagePlugin.py\n@@ -75,6 +75,9 @@\n \n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n+ if mode == \"RGB\" and psd_channels == 4:\n+ mode = \"RGBA\"\n+ channels = 4\n \n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n", "issue": "PSD incorrectly loaded\n### What did you do?\r\nI opened the TIFF in Pillow and converted it to JPG.\r\n### What did you expect to happen?\r\nThe JPG image to look the same as the original TIFF.\r\n### What actually happened?\r\nThe converted JPG looks malformed and has messed up colors.\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: Linux\r\n* Python: 3.10.5\r\n* Pillow: 9.1.1 (also tested -git)\r\n\r\n```python\r\n>>> img = Image.open(\"3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif\")\r\n>>> out_img = img.convert(\"RGB\")\r\n>>> out_img.save(\"converted.jpg\", quality=95)\r\n```\r\n[original image](https://api.collectie.gent/storage/v1/download/3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif) (beware, 274MB)\r\n[converted image](https://api.collectie.gent/storage/v1/download/3a029a4f48b480211286486a6a1f0f0b-transcode-OA_535_161_17_F_TE.jpg)\r\n\r\nIs it okay to report this here or should I report this to the appropriate library (libtiff, jpeg-turbo, ?)\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8\nfrom ._binary import i16be as i16\nfrom ._binary import i32be as i32\nfrom ._binary import si16be as si16\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s, 4) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s, 22)\n psd_channels = i16(s, 12)\n psd_mode = i16(s, 24)\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n\n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))\n self.layers = _layerinfo(_layer_data, size)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self._fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self._fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n\ndef _layerinfo(fp, ct_bytes):\n # read layerinfo block\n layers = []\n\n def read(size):\n return ImageFile._safe_read(fp, size)\n\n ct = si16(read(2))\n\n # sanity check\n if ct_bytes < (abs(ct) * 20):\n raise SyntaxError(\"Layer block too short for number of layers requested\")\n\n for _ in range(abs(ct)):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n mode = []\n ct_types = i16(read(2))\n types = list(range(ct_types))\n if len(types) > 4:\n continue\n\n for _ in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n read(4) # size\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n if size:\n data_end = fp.tell() + size\n\n length = i32(read(4))\n if length:\n fp.seek(length - 16, io.SEEK_CUR)\n\n length = i32(read(4))\n if length:\n fp.seek(length, io.SEEK_CUR)\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n\n fp.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(fp, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount, i)\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n\nImage.register_mime(PsdImageFile.format, \"image/vnd.adobe.photoshop\")\n", "path": "src/PIL/PsdImagePlugin.py"}]} | 3,796 | 132 |
gh_patches_debug_34741 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] Add Teams specific telemetry properties
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/4256
Add Teams specific telemetry properties when activity received via the Teams channel.
See also https://github.com/microsoft/botframework-sdk/issues/5855
# Changed projects
* Microsoft.Bot.Builder
* Microsoft.Bot.Builder.Tests
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 """Middleware Component for logging Activity messages."""
4
5 from typing import Awaitable, Callable, List, Dict
6 from botbuilder.schema import Activity, ConversationReference, ActivityTypes
7 from .bot_telemetry_client import BotTelemetryClient
8 from .bot_assert import BotAssert
9 from .middleware_set import Middleware
10 from .null_telemetry_client import NullTelemetryClient
11 from .turn_context import TurnContext
12 from .telemetry_constants import TelemetryConstants
13 from .telemetry_logger_constants import TelemetryLoggerConstants
14
15
16 # pylint: disable=line-too-long
17 class TelemetryLoggerMiddleware(Middleware):
18 """Middleware for logging incoming, outgoing, updated or deleted Activity messages."""
19
20 def __init__(
21 self, telemetry_client: BotTelemetryClient, log_personal_information: bool
22 ) -> None:
23 super(TelemetryLoggerMiddleware, self).__init__()
24 self._telemetry_client = telemetry_client or NullTelemetryClient()
25 self._log_personal_information = log_personal_information
26
27 @property
28 def telemetry_client(self) -> BotTelemetryClient:
29 """Gets the currently configured BotTelemetryClient."""
30 return self._telemetry_client
31
32 @property
33 def log_personal_information(self) -> bool:
34 """ Gets a value indicating whether determines whether to log personal
35 information that came from the user."""
36 return self._log_personal_information
37
38 # pylint: disable=arguments-differ
39 async def on_turn(
40 self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]
41 ) -> None:
42 """Logs events based on incoming and outgoing activities using
43 BotTelemetryClient base class
44
45 :param turn_context: The context object for this turn.
46 :param logic: Callable to continue the bot middleware pipeline
47
48 :return: None
49 """
50 BotAssert.context_not_none(context)
51
52 # Log incoming activity at beginning of turn
53 if context.activity:
54 activity = context.activity
55 # Log Bot Message Received
56 await self.on_receive_activity(activity)
57
58 # hook up onSend pipeline
59 # pylint: disable=unused-argument
60 async def send_activities_handler(
61 ctx: TurnContext,
62 activities: List[Activity],
63 next_send: Callable[[], Awaitable[None]],
64 ):
65 # Run full pipeline
66 responses = await next_send()
67 for activity in activities:
68 await self.on_send_activity(activity)
69 return responses
70
71 context.on_send_activities(send_activities_handler)
72
73 # hook up update activity pipeline
74 async def update_activity_handler(
75 ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]
76 ):
77 # Run full pipeline
78 response = await next_update()
79 await self.on_update_activity(activity)
80 return response
81
82 context.on_update_activity(update_activity_handler)
83
84 # hook up delete activity pipeline
85 async def delete_activity_handler(
86 ctx: TurnContext,
87 reference: ConversationReference,
88 next_delete: Callable[[], Awaitable],
89 ):
90 # Run full pipeline
91 await next_delete()
92
93 delete_msg = Activity(
94 type=ActivityTypes.message_delete, id=reference.activity_id
95 )
96 deleted_activity: Activity = TurnContext.apply_conversation_reference(
97 delete_msg, reference, False
98 )
99 await self.on_delete_activity(deleted_activity)
100
101 context.on_delete_activity(delete_activity_handler)
102
103 if logic_fn:
104 await logic_fn()
105
106 async def on_receive_activity(self, activity: Activity) -> None:
107 """Invoked when a message is received from the user.
108 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
109 This event name used is "BotMessageReceived".
110 :param activity: Current activity sent from user.
111 """
112 self.telemetry_client.track_event(
113 TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,
114 await self.fill_receive_event_properties(activity),
115 )
116
117 async def on_send_activity(self, activity: Activity) -> None:
118 """Invoked when the bot sends a message to the user.
119 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
120 This event name used is "BotMessageSend".
121 :param activity: Current activity sent from bot.
122 """
123 self.telemetry_client.track_event(
124 TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,
125 await self.fill_send_event_properties(activity),
126 )
127
128 async def on_update_activity(self, activity: Activity) -> None:
129 """Invoked when the bot updates a message.
130 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
131 This event name used is "BotMessageUpdate".
132 :param activity: Current activity sent from user.
133 """
134 self.telemetry_client.track_event(
135 TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,
136 await self.fill_update_event_properties(activity),
137 )
138
139 async def on_delete_activity(self, activity: Activity) -> None:
140 """Invoked when the bot deletes a message.
141 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
142 This event name used is "BotMessageDelete".
143 :param activity: Current activity sent from user.
144 """
145 self.telemetry_client.track_event(
146 TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,
147 await self.fill_delete_event_properties(activity),
148 )
149
150 async def fill_receive_event_properties(
151 self, activity: Activity, additional_properties: Dict[str, str] = None
152 ) -> Dict[str, str]:
153 """Fills the event properties for the BotMessageReceived.
154 Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.
155 :param activity: activity sent from user.
156 :param additional_properties: Additional properties to add to the event.
157 Additional properties can override "stock" properties.
158
159 :return: A dictionary that is sent as "Properties" to
160 BotTelemetryClient.track_event method for the BotMessageReceived event.
161 """
162 properties = {
163 TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id
164 if activity.from_property
165 else None,
166 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
167 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
168 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
169 TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,
170 }
171
172 if self.log_personal_information:
173 if (
174 activity.from_property
175 and activity.from_property.name
176 and activity.from_property.name.strip()
177 ):
178 properties[
179 TelemetryConstants.FROM_NAME_PROPERTY
180 ] = activity.from_property.name
181 if activity.text and activity.text.strip():
182 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
183 if activity.speak and activity.speak.strip():
184 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
185
186 # Additional properties can override "stock" properties
187 if additional_properties:
188 for prop in additional_properties:
189 properties[prop.key] = prop.value
190
191 return properties
192
193 async def fill_send_event_properties(
194 self, activity: Activity, additional_properties: Dict[str, str] = None
195 ) -> Dict[str, str]:
196 """Fills the event properties for the BotMessageSend.
197 These properties are logged when an activity message is sent by the Bot to the user.
198 :param activity: activity sent from user.
199 :param additional_properties: Additional properties to add to the event.
200 Additional properties can override "stock" properties.
201
202 :return: A dictionary that is sent as "Properties" to the
203 BotTelemetryClient.track_event method for the BotMessageSend event.
204 """
205 properties = {
206 TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,
207 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
208 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
209 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
210 }
211
212 # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples
213 if self.log_personal_information:
214 if activity.attachments and activity.attachments.strip():
215 properties[
216 TelemetryConstants.ATTACHMENTS_PROPERTY
217 ] = activity.attachments
218 if activity.from_property.name and activity.from_property.name.strip():
219 properties[
220 TelemetryConstants.FROM_NAME_PROPERTY
221 ] = activity.from_property.name
222 if activity.text and activity.text.strip():
223 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
224 if activity.speak and activity.speak.strip():
225 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
226
227 # Additional properties can override "stock" properties
228 if additional_properties:
229 for prop in additional_properties:
230 properties[prop.key] = prop.value
231
232 return properties
233
234 async def fill_update_event_properties(
235 self, activity: Activity, additional_properties: Dict[str, str] = None
236 ) -> Dict[str, str]:
237 """Fills the event properties for the BotMessageUpdate.
238 These properties are logged when an activity message is updated by the Bot.
239 For example, if a card is interacted with by the use, and the card needs
240 to be updated to reflect some interaction.
241 :param activity: activity sent from user.
242 :param additional_properties: Additional properties to add to the event.
243 Additional properties can override "stock" properties.
244
245 :return: A dictionary that is sent as "Properties" to the
246 BotTelemetryClient.track_event method for the BotMessageUpdate event.
247 """
248 properties = {
249 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
250 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
251 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
252 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
253 }
254
255 # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples
256 if self.log_personal_information:
257 if activity.text and activity.text.strip():
258 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
259
260 # Additional properties can override "stock" properties
261 if additional_properties:
262 for prop in additional_properties:
263 properties[prop.key] = prop.value
264
265 return properties
266
267 async def fill_delete_event_properties(
268 self, activity: Activity, additional_properties: Dict[str, str] = None
269 ) -> Dict[str, str]:
270 """Fills the event properties for the BotMessageDelete.
271 These properties are logged when an activity message is deleted by the Bot.
272 :param activity: activity sent from user.
273 :param additional_properties: Additional properties to add to the event.
274 Additional properties can override "stock" properties.
275
276 :return: A dictionary that is sent as "Properties" to the
277 BotTelemetryClient.track_event method for the BotMessageUpdate event.
278 """
279 properties = {
280 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
281 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
282 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
283 }
284
285 # Additional properties can override "stock" properties
286 if additional_properties:
287 for prop in additional_properties:
288 properties[prop.key] = prop.value
289
290 return properties
291
[end of libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
@@ -1,9 +1,11 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Middleware Component for logging Activity messages."""
-
from typing import Awaitable, Callable, List, Dict
from botbuilder.schema import Activity, ConversationReference, ActivityTypes
+from botbuilder.schema.teams import TeamsChannelData, TeamInfo
+from botframework.connector import Channels
+
from .bot_telemetry_client import BotTelemetryClient
from .bot_assert import BotAssert
from .middleware_set import Middleware
@@ -183,6 +185,10 @@
if activity.speak and activity.speak.strip():
properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
+ TelemetryLoggerMiddleware.__populate_additional_channel_properties(
+ activity, properties
+ )
+
# Additional properties can override "stock" properties
if additional_properties:
for prop in additional_properties:
@@ -288,3 +294,25 @@
properties[prop.key] = prop.value
return properties
+
+ @staticmethod
+ def __populate_additional_channel_properties(
+ activity: Activity, properties: dict,
+ ):
+ if activity.channel_id == Channels.ms_teams:
+ teams_channel_data: TeamsChannelData = activity.channel_data
+
+ properties["TeamsTenantId"] = (
+ teams_channel_data.tenant
+ if teams_channel_data and teams_channel_data.tenant
+ else ""
+ )
+
+ properties["TeamsUserAadObjectId"] = (
+ activity.from_property.aad_object_id if activity.from_property else ""
+ )
+
+ if teams_channel_data and teams_channel_data.team:
+ properties["TeamsTeamInfo"] = TeamInfo.serialize(
+ teams_channel_data.team
+ )
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n@@ -1,9 +1,11 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n \"\"\"Middleware Component for logging Activity messages.\"\"\"\n-\n from typing import Awaitable, Callable, List, Dict\n from botbuilder.schema import Activity, ConversationReference, ActivityTypes\n+from botbuilder.schema.teams import TeamsChannelData, TeamInfo\n+from botframework.connector import Channels\n+\n from .bot_telemetry_client import BotTelemetryClient\n from .bot_assert import BotAssert\n from .middleware_set import Middleware\n@@ -183,6 +185,10 @@\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n \n+ TelemetryLoggerMiddleware.__populate_additional_channel_properties(\n+ activity, properties\n+ )\n+\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n@@ -288,3 +294,25 @@\n properties[prop.key] = prop.value\n \n return properties\n+\n+ @staticmethod\n+ def __populate_additional_channel_properties(\n+ activity: Activity, properties: dict,\n+ ):\n+ if activity.channel_id == Channels.ms_teams:\n+ teams_channel_data: TeamsChannelData = activity.channel_data\n+\n+ properties[\"TeamsTenantId\"] = (\n+ teams_channel_data.tenant\n+ if teams_channel_data and teams_channel_data.tenant\n+ else \"\"\n+ )\n+\n+ properties[\"TeamsUserAadObjectId\"] = (\n+ activity.from_property.aad_object_id if activity.from_property else \"\"\n+ )\n+\n+ if teams_channel_data and teams_channel_data.team:\n+ properties[\"TeamsTeamInfo\"] = TeamInfo.serialize(\n+ teams_channel_data.team\n+ )\n", "issue": "[PORT] Add Teams specific telemetry properties \n> Port this change from botbuilder-dotnet/master branch:\r\nhttps://github.com/microsoft/botbuilder-dotnet/pull/4256\r\n\r\nAdd Teams specific telemetry properties when activity received via the Teams channel.\r\n\r\nSee also https://github.com/microsoft/botframework-sdk/issues/5855\r\n\r\n\r\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder\r\n* Microsoft.Bot.Builder.Tests\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Middleware Component for logging Activity messages.\"\"\"\n\nfrom typing import Awaitable, Callable, List, Dict\nfrom botbuilder.schema import Activity, ConversationReference, ActivityTypes\nfrom .bot_telemetry_client import BotTelemetryClient\nfrom .bot_assert import BotAssert\nfrom .middleware_set import Middleware\nfrom .null_telemetry_client import NullTelemetryClient\nfrom .turn_context import TurnContext\nfrom .telemetry_constants import TelemetryConstants\nfrom .telemetry_logger_constants import TelemetryLoggerConstants\n\n\n# pylint: disable=line-too-long\nclass TelemetryLoggerMiddleware(Middleware):\n \"\"\"Middleware for logging incoming, outgoing, updated or deleted Activity messages.\"\"\"\n\n def __init__(\n self, telemetry_client: BotTelemetryClient, log_personal_information: bool\n ) -> None:\n super(TelemetryLoggerMiddleware, self).__init__()\n self._telemetry_client = telemetry_client or NullTelemetryClient()\n self._log_personal_information = log_personal_information\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"Gets the currently configured BotTelemetryClient.\"\"\"\n return self._telemetry_client\n\n @property\n def log_personal_information(self) -> bool:\n \"\"\" Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n\n # pylint: disable=arguments-differ\n async def on_turn(\n self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]\n ) -> None:\n \"\"\"Logs events based on incoming and outgoing activities using\n BotTelemetryClient base class\n\n :param turn_context: The context object for this turn.\n :param logic: Callable to continue the bot middleware pipeline\n\n :return: None\n \"\"\"\n BotAssert.context_not_none(context)\n\n # Log incoming activity at beginning of turn\n if context.activity:\n activity = context.activity\n # Log Bot Message Received\n await self.on_receive_activity(activity)\n\n # hook up onSend pipeline\n # pylint: disable=unused-argument\n async def send_activities_handler(\n ctx: TurnContext,\n activities: List[Activity],\n next_send: Callable[[], Awaitable[None]],\n ):\n # Run full pipeline\n responses = await next_send()\n for activity in activities:\n await self.on_send_activity(activity)\n return responses\n\n context.on_send_activities(send_activities_handler)\n\n # hook up update activity pipeline\n async def update_activity_handler(\n ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]\n ):\n # Run full pipeline\n response = await next_update()\n await self.on_update_activity(activity)\n return response\n\n context.on_update_activity(update_activity_handler)\n\n # hook up delete activity pipeline\n async def delete_activity_handler(\n ctx: TurnContext,\n reference: ConversationReference,\n next_delete: Callable[[], Awaitable],\n ):\n # Run full pipeline\n await next_delete()\n\n delete_msg = Activity(\n type=ActivityTypes.message_delete, id=reference.activity_id\n )\n deleted_activity: Activity = TurnContext.apply_conversation_reference(\n delete_msg, reference, False\n )\n await self.on_delete_activity(deleted_activity)\n\n context.on_delete_activity(delete_activity_handler)\n\n if logic_fn:\n await logic_fn()\n\n async def on_receive_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when a message is received from the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageReceived\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,\n await self.fill_receive_event_properties(activity),\n )\n\n async def on_send_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot sends a message to the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageSend\".\n :param activity: Current activity sent from bot.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,\n await self.fill_send_event_properties(activity),\n )\n\n async def on_update_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot updates a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageUpdate\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,\n await self.fill_update_event_properties(activity),\n )\n\n async def on_delete_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot deletes a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageDelete\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,\n await self.fill_delete_event_properties(activity),\n )\n\n async def fill_receive_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageReceived.\n Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id\n if activity.from_property\n else None,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,\n }\n\n if self.log_personal_information:\n if (\n activity.from_property\n and activity.from_property.name\n and activity.from_property.name.strip()\n ):\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_send_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageSend.\n These properties are logged when an activity message is sent by the Bot to the user.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageSend event.\n \"\"\"\n properties = {\n TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n if activity.attachments and activity.attachments.strip():\n properties[\n TelemetryConstants.ATTACHMENTS_PROPERTY\n ] = activity.attachments\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_update_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageUpdate.\n These properties are logged when an activity message is updated by the Bot.\n For example, if a card is interacted with by the use, and the card needs\n to be updated to reflect some interaction.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples\n if self.log_personal_information:\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_delete_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageDelete.\n These properties are logged when an activity message is deleted by the Bot.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n }\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n", "path": "libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py"}]} | 3,826 | 470 |
gh_patches_debug_7326 | rasdani/github-patches | git_diff | microsoft__ptvsd-619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Launching a Python module using `-m` with `ptvsd.enable_attach` in __init__ fails
## Environment data
- PTVSD version: Master
- OS and version: Mac (may not be OS specific)
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Using VS Code or Visual Studio:N/A
## Actual behavior
```
Traceback (most recent call last):
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/tests/resources/system_tests/test_exceptions/mymod_attach1/__init__.py", line 4, in <module>
ptvsd.enable_attach((sys.argv[1], sys.argv[2]))
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/attach_server.py", line 71, in enable_attach
redirect_output=redirect_output,
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/_remote.py", line 27, in enable_attach
**kwargs
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd_hooks.py", line 95, in install
if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:
AttributeError: module '__main__' has no attribute '__file__'
```
## Expected behavior
Should not error out
## Steps to reproduce:
* Create a python module
* Add the following code in the `__init__.py`
```python
import sys
import ptvsd
ptvsd.enable_attach(('localhost', 9876))
ptvsd.wait_for_attach()
try:
raise ArithmeticError('Hello')
except Exception:
pass
sys.stdout.write('end')
```
* Start the module `python -m xyz`
That's when it goes kaboom
</issue>
<code>
[start of ptvsd/pydevd_hooks.py]
1 import sys
2
3 from _pydevd_bundle import pydevd_comm
4
5 from ptvsd.socket import Address
6 from ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError
7 from ptvsd._util import debug, new_hidden_thread
8
9
10 def start_server(daemon, host, port, **kwargs):
11 """Return a socket to a (new) local pydevd-handling daemon.
12
13 The daemon supports the pydevd client wire protocol, sending
14 requests and handling responses (and events).
15
16 This is a replacement for _pydevd_bundle.pydevd_comm.start_server.
17 """
18 sock, next_session = daemon.start_server((host, port))
19
20 def handle_next():
21 try:
22 session = next_session(**kwargs)
23 debug('done waiting')
24 return session
25 except (DaemonClosedError, DaemonStoppedError):
26 # Typically won't happen.
27 debug('stopped')
28 raise
29 except Exception as exc:
30 # TODO: log this?
31 debug('failed:', exc, tb=True)
32 return None
33
34 while True:
35 debug('waiting on initial connection')
36 handle_next()
37 break
38
39 def serve_forever():
40 while True:
41 debug('waiting on next connection')
42 try:
43 handle_next()
44 except (DaemonClosedError, DaemonStoppedError):
45 break
46 debug('done')
47
48 t = new_hidden_thread(
49 target=serve_forever,
50 name='sessions',
51 )
52 t.start()
53 return sock
54
55
56 def start_client(daemon, host, port, **kwargs):
57 """Return a socket to an existing "remote" pydevd-handling daemon.
58
59 The daemon supports the pydevd client wire protocol, sending
60 requests and handling responses (and events).
61
62 This is a replacement for _pydevd_bundle.pydevd_comm.start_client.
63 """
64 sock, start_session = daemon.start_client((host, port))
65 start_session(**kwargs)
66 return sock
67
68
69 def install(pydevd, address,
70 start_server=start_server, start_client=start_client,
71 **kwargs):
72 """Configure pydevd to use our wrapper.
73
74 This is a bit of a hack to allow us to run our VSC debug adapter
75 in the same process as pydevd. Note that, as with most hacks,
76 this is somewhat fragile (since the monkeypatching sites may
77 change).
78 """
79 addr = Address.from_raw(address)
80 daemon = Daemon(**kwargs)
81
82 _start_server = (lambda p: start_server(daemon, addr.host, p))
83 _start_server.orig = start_server
84 _start_client = (lambda h, p: start_client(daemon, h, p))
85 _start_client.orig = start_client
86
87 # These are the functions pydevd invokes to get a socket to the client.
88 pydevd_comm.start_server = _start_server
89 pydevd_comm.start_client = _start_client
90
91 # Ensure that pydevd is using our functions.
92 pydevd.start_server = _start_server
93 pydevd.start_client = _start_client
94 __main__ = sys.modules['__main__']
95 if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:
96 __main__.start_server = _start_server
97 __main__.start_client = _start_client
98 return daemon
99
[end of ptvsd/pydevd_hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ptvsd/pydevd_hooks.py b/ptvsd/pydevd_hooks.py
--- a/ptvsd/pydevd_hooks.py
+++ b/ptvsd/pydevd_hooks.py
@@ -92,7 +92,8 @@
pydevd.start_server = _start_server
pydevd.start_client = _start_client
__main__ = sys.modules['__main__']
- if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:
- __main__.start_server = _start_server
- __main__.start_client = _start_client
+ if __main__ is not pydevd:
+ if getattr(__main__, '__file__', None) == pydevd.__file__:
+ __main__.start_server = _start_server
+ __main__.start_client = _start_client
return daemon
| {"golden_diff": "diff --git a/ptvsd/pydevd_hooks.py b/ptvsd/pydevd_hooks.py\n--- a/ptvsd/pydevd_hooks.py\n+++ b/ptvsd/pydevd_hooks.py\n@@ -92,7 +92,8 @@\n pydevd.start_server = _start_server\n pydevd.start_client = _start_client\n __main__ = sys.modules['__main__']\n- if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:\n- __main__.start_server = _start_server\n- __main__.start_client = _start_client\n+ if __main__ is not pydevd:\n+ if getattr(__main__, '__file__', None) == pydevd.__file__:\n+ __main__.start_server = _start_server\n+ __main__.start_client = _start_client\n return daemon\n", "issue": "Launching a Python module using `-m` with `ptvsd.enable_attach` in __init__ fails\n## Environment data\r\n\r\n- PTVSD version: Master\r\n- OS and version: Mac (may not be OS specific)\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.6\r\n- Using VS Code or Visual Studio:N/A\r\n\r\n## Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/tests/resources/system_tests/test_exceptions/mymod_attach1/__init__.py\", line 4, in <module>\r\n ptvsd.enable_attach((sys.argv[1], sys.argv[2]))\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/attach_server.py\", line 71, in enable_attach\r\n redirect_output=redirect_output,\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/_remote.py\", line 27, in enable_attach\r\n **kwargs\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd_hooks.py\", line 95, in install\r\n if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:\r\nAttributeError: module '__main__' has no attribute '__file__'\r\n```\r\n\r\n## Expected behavior\r\n\r\nShould not error out\r\n\r\n## Steps to reproduce:\r\n\r\n* Create a python module\r\n* Add the following code in the `__init__.py`\r\n```python\r\nimport sys\r\nimport ptvsd\r\nptvsd.enable_attach(('localhost', 9876))\r\nptvsd.wait_for_attach()\r\n\r\ntry:\r\n raise ArithmeticError('Hello')\r\nexcept Exception:\r\n pass\r\nsys.stdout.write('end')\r\n```\r\n* Start the module `python -m xyz`\r\nThat's when it goes kaboom\n", "before_files": [{"content": "import sys\n\nfrom _pydevd_bundle import pydevd_comm\n\nfrom ptvsd.socket import Address\nfrom ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError\nfrom ptvsd._util import debug, new_hidden_thread\n\n\ndef start_server(daemon, host, port, **kwargs):\n \"\"\"Return a socket to a (new) local pydevd-handling daemon.\n\n The daemon supports the pydevd client wire protocol, sending\n requests and handling responses (and events).\n\n This is a replacement for _pydevd_bundle.pydevd_comm.start_server.\n \"\"\"\n sock, next_session = daemon.start_server((host, port))\n\n def handle_next():\n try:\n session = next_session(**kwargs)\n debug('done waiting')\n return session\n except (DaemonClosedError, DaemonStoppedError):\n # Typically won't happen.\n debug('stopped')\n raise\n except Exception as exc:\n # TODO: log this?\n debug('failed:', exc, tb=True)\n return None\n\n while True:\n debug('waiting on initial connection')\n handle_next()\n break\n\n def serve_forever():\n while True:\n debug('waiting on next connection')\n try:\n handle_next()\n except (DaemonClosedError, DaemonStoppedError):\n break\n debug('done')\n\n t = new_hidden_thread(\n target=serve_forever,\n name='sessions',\n )\n t.start()\n return sock\n\n\ndef start_client(daemon, host, port, **kwargs):\n \"\"\"Return a socket to an existing \"remote\" pydevd-handling daemon.\n\n The daemon supports the pydevd client wire protocol, sending\n requests and handling responses (and events).\n\n This is a replacement for _pydevd_bundle.pydevd_comm.start_client.\n \"\"\"\n sock, start_session = daemon.start_client((host, port))\n start_session(**kwargs)\n return sock\n\n\ndef install(pydevd, address,\n start_server=start_server, start_client=start_client,\n **kwargs):\n \"\"\"Configure pydevd to use our wrapper.\n\n This is a bit of a hack to allow us to run our VSC debug adapter\n in the same process as pydevd. Note that, as with most hacks,\n this is somewhat fragile (since the monkeypatching sites may\n change).\n \"\"\"\n addr = Address.from_raw(address)\n daemon = Daemon(**kwargs)\n\n _start_server = (lambda p: start_server(daemon, addr.host, p))\n _start_server.orig = start_server\n _start_client = (lambda h, p: start_client(daemon, h, p))\n _start_client.orig = start_client\n\n # These are the functions pydevd invokes to get a socket to the client.\n pydevd_comm.start_server = _start_server\n pydevd_comm.start_client = _start_client\n\n # Ensure that pydevd is using our functions.\n pydevd.start_server = _start_server\n pydevd.start_client = _start_client\n __main__ = sys.modules['__main__']\n if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:\n __main__.start_server = _start_server\n __main__.start_client = _start_client\n return daemon\n", "path": "ptvsd/pydevd_hooks.py"}]} | 1,879 | 202 |
gh_patches_debug_26783 | rasdani/github-patches | git_diff | bridgecrewio__checkov-748 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False positive for check 'MSKClusterEncryption' (CKV_AWS_81)
**Describe the bug**
CKV_AWS_81 is reporting MSK clusters as unencrypted at rest while they are encrypted at rest.
**To Reproduce**
Example Terraform code producing a cluster with encryption (it will use the default KMS key as documented in https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn)
(slightly adapted from example on the msk_cluster resource docs)
<details>
<summary>Code</summary>
```
resource "aws_vpc" "vpc" {
cidr_block = "192.168.0.0/22"
}
data "aws_availability_zones" "azs" {
state = "available"
}
resource "aws_subnet" "subnet_az1" {
availability_zone = data.aws_availability_zones.azs.names[0]
cidr_block = "192.168.0.0/24"
vpc_id = aws_vpc.vpc.id
}
resource "aws_subnet" "subnet_az2" {
availability_zone = data.aws_availability_zones.azs.names[1]
cidr_block = "192.168.1.0/24"
vpc_id = aws_vpc.vpc.id
}
resource "aws_subnet" "subnet_az3" {
availability_zone = data.aws_availability_zones.azs.names[2]
cidr_block = "192.168.2.0/24"
vpc_id = aws_vpc.vpc.id
}
resource "aws_security_group" "sg" {
vpc_id = aws_vpc.vpc.id
}
resource "aws_kms_key" "kms" {
description = "example"
}
resource "aws_cloudwatch_log_group" "test" {
name = "msk_broker_logs"
}
resource "aws_s3_bucket" "bucket" {
bucket = "msk-broker-logs-bucket"
acl = "private"
}
resource "aws_iam_role" "firehose_role" {
name = "firehose_test_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
name = "terraform-kinesis-firehose-msk-broker-logs-stream"
destination = "s3"
s3_configuration {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
}
tags = {
LogDeliveryEnabled = "placeholder"
}
lifecycle {
ignore_changes = [
tags["LogDeliveryEnabled"],
]
}
}
resource "aws_msk_cluster" "example" {
cluster_name = "example"
kafka_version = "2.4.1"
number_of_broker_nodes = 3
broker_node_group_info {
instance_type = "kafka.m5.large"
ebs_volume_size = 1000
client_subnets = [
aws_subnet.subnet_az1.id,
aws_subnet.subnet_az2.id,
aws_subnet.subnet_az3.id,
]
security_groups = [aws_security_group.sg.id]
}
encryption_info {
encryption_in_transit {
client_broker = "TLS"
in_cluster = true
}
}
open_monitoring {
prometheus {
jmx_exporter {
enabled_in_broker = true
}
node_exporter {
enabled_in_broker = true
}
}
}
logging_info {
broker_logs {
cloudwatch_logs {
enabled = true
log_group = aws_cloudwatch_log_group.test.name
}
firehose {
enabled = true
delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name
}
s3 {
enabled = true
bucket = aws_s3_bucket.bucket.id
prefix = "logs/msk-"
}
}
}
tags = {
foo = "bar"
}
}
output "zookeeper_connect_string" {
value = aws_msk_cluster.example.zookeeper_connect_string
}
output "bootstrap_brokers_tls" {
description = "TLS connection host:port pairs"
value = aws_msk_cluster.example.bootstrap_brokers_tls
}
```
</details>
**Expected behavior**
No error is thrown as long as an encryption_info block is defined
Checkov Version 1.0.669
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/MSKClusterEncryption.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class MSKClusterEncryption(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure MSK Cluster encryption in rest and transit is enabled"
8 id = "CKV_AWS_81"
9 supported_resources = ['aws_msk_cluster']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'encryption_info' in conf.keys():
15 encryption = conf['encryption_info'][0]
16 if 'encryption_at_rest_kms_key_arn' in encryption:
17 if 'encryption_in_transit' in encryption:
18 transit = encryption['encryption_in_transit'][0]
19 if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \
20 'in_cluster' in transit and transit['in_cluster'][0] is False:
21 return CheckResult.FAILED
22 return CheckResult.PASSED
23 return CheckResult.PASSED
24 return CheckResult.FAILED
25
26
27 check = MSKClusterEncryption()
28
[end of checkov/terraform/checks/resource/aws/MSKClusterEncryption.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py
--- a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py
+++ b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py
@@ -11,16 +11,18 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
+ # Note: As long as the 'encryption_info' block is specified, the cluster
+ # will be encrypted at rest even if 'encryption_at_rest_kms_key_arn' is not specified
+ # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn
if 'encryption_info' in conf.keys():
encryption = conf['encryption_info'][0]
- if 'encryption_at_rest_kms_key_arn' in encryption:
- if 'encryption_in_transit' in encryption:
- transit = encryption['encryption_in_transit'][0]
- if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \
- 'in_cluster' in transit and transit['in_cluster'][0] is False:
- return CheckResult.FAILED
- return CheckResult.PASSED
+ if 'encryption_in_transit' in encryption:
+ transit = encryption['encryption_in_transit'][0]
+ if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \
+ 'in_cluster' in transit and transit['in_cluster'][0] is False:
+ return CheckResult.FAILED
return CheckResult.PASSED
+ return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py\n--- a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py\n@@ -11,16 +11,18 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n+ # Note: As long as the 'encryption_info' block is specified, the cluster\n+ # will be encrypted at rest even if 'encryption_at_rest_kms_key_arn' is not specified\n+ # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn\n if 'encryption_info' in conf.keys():\n encryption = conf['encryption_info'][0]\n- if 'encryption_at_rest_kms_key_arn' in encryption:\n- if 'encryption_in_transit' in encryption:\n- transit = encryption['encryption_in_transit'][0]\n- if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n- 'in_cluster' in transit and transit['in_cluster'][0] is False:\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n+ if 'encryption_in_transit' in encryption:\n+ transit = encryption['encryption_in_transit'][0]\n+ if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n+ 'in_cluster' in transit and transit['in_cluster'][0] is False:\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n+ return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "False positive for check 'MSKClusterEncryption' (CKV_AWS_81)\n**Describe the bug**\r\nCKV_AWS_81 is reporting MSK clusters as unencrypted at rest while they are encrypted at rest.\r\n\r\n**To Reproduce**\r\nExample Terraform code producing a cluster with encryption (it will use the default KMS key as documented in https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn)\r\n\r\n(slightly adapted from example on the msk_cluster resource docs)\r\n<details>\r\n <summary>Code</summary>\r\n\r\n```\r\nresource \"aws_vpc\" \"vpc\" {\r\n cidr_block = \"192.168.0.0/22\"\r\n}\r\n\r\ndata \"aws_availability_zones\" \"azs\" {\r\n state = \"available\"\r\n}\r\n\r\nresource \"aws_subnet\" \"subnet_az1\" {\r\n availability_zone = data.aws_availability_zones.azs.names[0]\r\n cidr_block = \"192.168.0.0/24\"\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_subnet\" \"subnet_az2\" {\r\n availability_zone = data.aws_availability_zones.azs.names[1]\r\n cidr_block = \"192.168.1.0/24\"\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_subnet\" \"subnet_az3\" {\r\n availability_zone = data.aws_availability_zones.azs.names[2]\r\n cidr_block = \"192.168.2.0/24\"\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_security_group\" \"sg\" {\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_kms_key\" \"kms\" {\r\n description = \"example\"\r\n}\r\n\r\nresource \"aws_cloudwatch_log_group\" \"test\" {\r\n name = \"msk_broker_logs\"\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"bucket\" {\r\n bucket = \"msk-broker-logs-bucket\"\r\n acl = \"private\"\r\n}\r\n\r\nresource \"aws_iam_role\" \"firehose_role\" {\r\n name = \"firehose_test_role\"\r\n\r\n assume_role_policy = <<EOF\r\n{\r\n\"Version\": \"2012-10-17\",\r\n\"Statement\": [\r\n {\r\n \"Action\": \"sts:AssumeRole\",\r\n \"Principal\": {\r\n \"Service\": \"firehose.amazonaws.com\"\r\n },\r\n \"Effect\": \"Allow\",\r\n \"Sid\": \"\"\r\n }\r\n ]\r\n}\r\nEOF\r\n}\r\n\r\nresource \"aws_kinesis_firehose_delivery_stream\" \"test_stream\" {\r\n name = \"terraform-kinesis-firehose-msk-broker-logs-stream\"\r\n destination = \"s3\"\r\n\r\n s3_configuration {\r\n role_arn = aws_iam_role.firehose_role.arn\r\n bucket_arn = aws_s3_bucket.bucket.arn\r\n }\r\n\r\n tags = {\r\n LogDeliveryEnabled = \"placeholder\"\r\n }\r\n\r\n lifecycle {\r\n ignore_changes = [\r\n tags[\"LogDeliveryEnabled\"],\r\n ]\r\n }\r\n}\r\n\r\nresource \"aws_msk_cluster\" \"example\" {\r\n cluster_name = \"example\"\r\n kafka_version = \"2.4.1\"\r\n number_of_broker_nodes = 3\r\n\r\n broker_node_group_info {\r\n instance_type = \"kafka.m5.large\"\r\n ebs_volume_size = 1000\r\n client_subnets = [\r\n aws_subnet.subnet_az1.id,\r\n aws_subnet.subnet_az2.id,\r\n aws_subnet.subnet_az3.id,\r\n ]\r\n security_groups = [aws_security_group.sg.id]\r\n }\r\n\r\n encryption_info {\r\n encryption_in_transit { \r\n client_broker = \"TLS\"\r\n in_cluster = true \r\n }\r\n }\r\n\r\n open_monitoring {\r\n prometheus {\r\n jmx_exporter {\r\n enabled_in_broker = true\r\n }\r\n node_exporter {\r\n enabled_in_broker = true\r\n }\r\n }\r\n }\r\n\r\n logging_info {\r\n broker_logs {\r\n cloudwatch_logs {\r\n enabled = true\r\n log_group = aws_cloudwatch_log_group.test.name\r\n }\r\n firehose {\r\n enabled = true\r\n delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name\r\n }\r\n s3 {\r\n enabled = true\r\n bucket = aws_s3_bucket.bucket.id\r\n prefix = \"logs/msk-\"\r\n }\r\n }\r\n }\r\n\r\n tags = {\r\n foo = \"bar\"\r\n }\r\n}\r\n\r\noutput \"zookeeper_connect_string\" {\r\n value = aws_msk_cluster.example.zookeeper_connect_string\r\n}\r\n\r\noutput \"bootstrap_brokers_tls\" {\r\n description = \"TLS connection host:port pairs\"\r\n value = aws_msk_cluster.example.bootstrap_brokers_tls\r\n}\r\n```\r\n\r\n</details>\r\n\r\n**Expected behavior**\r\nNo error is thrown as long as an encryption_info block is defined\r\n\r\n\r\n Checkov Version 1.0.669\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass MSKClusterEncryption(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure MSK Cluster encryption in rest and transit is enabled\"\n id = \"CKV_AWS_81\"\n supported_resources = ['aws_msk_cluster']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'encryption_info' in conf.keys():\n encryption = conf['encryption_info'][0]\n if 'encryption_at_rest_kms_key_arn' in encryption:\n if 'encryption_in_transit' in encryption:\n transit = encryption['encryption_in_transit'][0]\n if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n 'in_cluster' in transit and transit['in_cluster'][0] is False:\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = MSKClusterEncryption()\n", "path": "checkov/terraform/checks/resource/aws/MSKClusterEncryption.py"}]} | 1,949 | 414 |
gh_patches_debug_19960 | rasdani/github-patches | git_diff | microsoft__Qcodes-565 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
visa_handle.clear on instantiation causes some instruments to freeze
The use of visa_handle.clear on the instantiation of a VISA instrument causes some instruments to lock up. In particular I've noticed for SERIAL instruments, this command spews some garbage on the serial port that requires the instrument to be reset before it is possible to use it again.
### Steps to reproduce
1. Plug in a buggy instrument and instantiate an instance of it.
### Actual behaviour
The instrument freezes
### System
Win 7
Using the Harvard DecaDAC
2b6d72b
</issue>
<code>
[start of qcodes/instrument/visa.py]
1 """Visa instrument driver based on pyvisa."""
2 import visa
3
4 from .base import Instrument
5 import qcodes.utils.validators as vals
6
7
8 class VisaInstrument(Instrument):
9
10 """
11 Base class for all instruments using visa connections.
12
13 Args:
14 name (str): What this instrument is called locally.
15
16 address (str): The visa resource name to use to connect.
17 Optionally includes '@<backend>' at the end. For example,
18 'ASRL2' will open COM2 with the default NI backend, but
19 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes
20 does not install (or even require) ANY backends, it is up to
21 the user to do that. see eg:
22 http://pyvisa.readthedocs.org/en/stable/names.html
23
24 timeout (number): seconds to allow for responses. Default 5.
25
26 terminator: Read termination character(s) to look for. Default ''.
27
28 metadata (Optional[Dict]): additional static metadata to add to this
29 instrument's JSON snapshot.
30
31 See help for ``qcodes.Instrument`` for additional information on writing
32 instrument subclasses.
33
34 Attributes:
35 visa_handle (pyvisa.resources.Resource): The communication channel.
36 """
37
38 def __init__(self, name, address=None, timeout=5, terminator='', **kwargs):
39 super().__init__(name, **kwargs)
40
41 self.add_parameter('timeout',
42 get_cmd=self._get_visa_timeout,
43 set_cmd=self._set_visa_timeout,
44 unit='s',
45 vals=vals.MultiType(vals.Numbers(min_value=0),
46 vals.Enum(None)))
47
48 self.set_address(address)
49 self.set_terminator(terminator)
50 self.timeout.set(timeout)
51
52 def set_address(self, address):
53 """
54 Change the address for this instrument.
55
56 Args:
57 address: The visa resource name to use to connect.
58 Optionally includes '@<backend>' at the end. For example,
59 'ASRL2' will open COM2 with the default NI backend, but
60 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes
61 does not install (or even require) ANY backends, it is up to
62 the user to do that.
63 see eg: http://pyvisa.readthedocs.org/en/stable/names.html
64 """
65 # in case we're changing the address - close the old handle first
66 if getattr(self, 'visa_handle', None):
67 self.visa_handle.close()
68
69 if address and '@' in address:
70 address, visa_library = address.split('@')
71 resource_manager = visa.ResourceManager('@' + visa_library)
72 else:
73 resource_manager = visa.ResourceManager()
74
75 self.visa_handle = resource_manager.open_resource(address)
76
77 self.visa_handle.clear()
78 self._address = address
79
80 def set_terminator(self, terminator):
81 r"""
82 Change the read terminator to use.
83
84 Args:
85 terminator (str): Character(s) to look for at the end of a read.
86 eg. '\r\n'.
87 """
88 self.visa_handle.read_termination = terminator
89 self._terminator = terminator
90
91 def _set_visa_timeout(self, timeout):
92 if timeout is None:
93 self.visa_handle.timeout = None
94 else:
95 # pyvisa uses milliseconds but we use seconds
96 self.visa_handle.timeout = timeout * 1000.0
97
98 def _get_visa_timeout(self):
99 timeout_ms = self.visa_handle.timeout
100 if timeout_ms is None:
101 return None
102 else:
103 # pyvisa uses milliseconds but we use seconds
104 return timeout_ms / 1000
105
106 def close(self):
107 """Disconnect and irreversibly tear down the instrument."""
108 if getattr(self, 'visa_handle', None):
109 self.visa_handle.close()
110 super().close()
111
112 def check_error(self, ret_code):
113 """
114 Default error checking, raises an error if return code !=0.
115
116 Does not differentiate between warnings or specific error messages.
117 Override this function in your driver if you want to add specific
118 error messages.
119
120 Args:
121 ret_code (int): A Visa error code. See eg:
122 https://github.com/hgrecco/pyvisa/blob/master/pyvisa/errors.py
123
124 Raises:
125 visa.VisaIOError: if ``ret_code`` indicates a communication
126 problem.
127 """
128 if ret_code != 0:
129 raise visa.VisaIOError(ret_code)
130
131 def write_raw(self, cmd):
132 """
133 Low-level interface to ``visa_handle.write``.
134
135 Args:
136 cmd (str): The command to send to the instrument.
137 """
138 nr_bytes_written, ret_code = self.visa_handle.write(cmd)
139 self.check_error(ret_code)
140
141 def ask_raw(self, cmd):
142 """
143 Low-level interface to ``visa_handle.ask``.
144
145 Args:
146 cmd (str): The command to send to the instrument.
147
148 Returns:
149 str: The instrument's response.
150 """
151 return self.visa_handle.ask(cmd)
152
153 def snapshot_base(self, update=False):
154 """
155 State of the instrument as a JSON-compatible dict.
156
157 Args:
158 update (bool): If True, update the state by querying the
159 instrument. If False, just use the latest values in memory.
160
161 Returns:
162 dict: base snapshot
163 """
164 snap = super().snapshot_base(update=update)
165
166 snap['address'] = self._address
167 snap['terminator'] = self._terminator
168 snap['timeout'] = self.timeout.get()
169
170 return snap
171
[end of qcodes/instrument/visa.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qcodes/instrument/visa.py b/qcodes/instrument/visa.py
--- a/qcodes/instrument/visa.py
+++ b/qcodes/instrument/visa.py
@@ -1,10 +1,11 @@
"""Visa instrument driver based on pyvisa."""
import visa
+import pyvisa.constants as vi_const
+import pyvisa.resources
from .base import Instrument
import qcodes.utils.validators as vals
-
class VisaInstrument(Instrument):
"""
@@ -74,7 +75,13 @@
self.visa_handle = resource_manager.open_resource(address)
- self.visa_handle.clear()
+ # Serial instruments have a separate flush method to clear their buffers
+ # which behaves differently to clear. This is particularly important
+ # for instruments which do not support SCPI commands.
+ if isinstance(self.visa_handle, pyvisa.resources.SerialInstrument):
+ self.visa_handle.flush(vi_const.VI_READ_BUF_DISCARD | vi_const.VI_WRITE_BUF_DISCARD)
+ else:
+ self.visa_handle.clear()
self._address = address
def set_terminator(self, terminator):
| {"golden_diff": "diff --git a/qcodes/instrument/visa.py b/qcodes/instrument/visa.py\n--- a/qcodes/instrument/visa.py\n+++ b/qcodes/instrument/visa.py\n@@ -1,10 +1,11 @@\n \"\"\"Visa instrument driver based on pyvisa.\"\"\"\n import visa\n+import pyvisa.constants as vi_const\n+import pyvisa.resources\n \n from .base import Instrument\n import qcodes.utils.validators as vals\n \n-\n class VisaInstrument(Instrument):\n \n \"\"\"\n@@ -74,7 +75,13 @@\n \n self.visa_handle = resource_manager.open_resource(address)\n \n- self.visa_handle.clear()\n+ # Serial instruments have a separate flush method to clear their buffers\n+ # which behaves differently to clear. This is particularly important\n+ # for instruments which do not support SCPI commands.\n+ if isinstance(self.visa_handle, pyvisa.resources.SerialInstrument):\n+ self.visa_handle.flush(vi_const.VI_READ_BUF_DISCARD | vi_const.VI_WRITE_BUF_DISCARD)\n+ else:\n+ self.visa_handle.clear()\n self._address = address\n \n def set_terminator(self, terminator):\n", "issue": "visa_handle.clear on instantiation causes some instruments to freeze\nThe use of visa_handle.clear on the instantiation of a VISA instrument causes some instruments to lock up. In particular I've noticed for SERIAL instruments, this command spews some garbage on the serial port that requires the instrument to be reset before it is possible to use it again.\r\n\r\n### Steps to reproduce\r\n1. Plug in a buggy instrument and instantiate an instance of it.\r\n\r\n### Actual behaviour\r\nThe instrument freezes\r\n\r\n### System\r\nWin 7\r\nUsing the Harvard DecaDAC\r\n2b6d72b\n", "before_files": [{"content": "\"\"\"Visa instrument driver based on pyvisa.\"\"\"\nimport visa\n\nfrom .base import Instrument\nimport qcodes.utils.validators as vals\n\n\nclass VisaInstrument(Instrument):\n\n \"\"\"\n Base class for all instruments using visa connections.\n\n Args:\n name (str): What this instrument is called locally.\n\n address (str): The visa resource name to use to connect.\n Optionally includes '@<backend>' at the end. For example,\n 'ASRL2' will open COM2 with the default NI backend, but\n 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes\n does not install (or even require) ANY backends, it is up to\n the user to do that. see eg:\n http://pyvisa.readthedocs.org/en/stable/names.html\n\n timeout (number): seconds to allow for responses. Default 5.\n\n terminator: Read termination character(s) to look for. Default ''.\n\n metadata (Optional[Dict]): additional static metadata to add to this\n instrument's JSON snapshot.\n\n See help for ``qcodes.Instrument`` for additional information on writing\n instrument subclasses.\n\n Attributes:\n visa_handle (pyvisa.resources.Resource): The communication channel.\n \"\"\"\n\n def __init__(self, name, address=None, timeout=5, terminator='', **kwargs):\n super().__init__(name, **kwargs)\n\n self.add_parameter('timeout',\n get_cmd=self._get_visa_timeout,\n set_cmd=self._set_visa_timeout,\n unit='s',\n vals=vals.MultiType(vals.Numbers(min_value=0),\n vals.Enum(None)))\n\n self.set_address(address)\n self.set_terminator(terminator)\n self.timeout.set(timeout)\n\n def set_address(self, address):\n \"\"\"\n Change the address for this instrument.\n\n Args:\n address: The visa resource name to use to connect.\n Optionally includes '@<backend>' at the end. For example,\n 'ASRL2' will open COM2 with the default NI backend, but\n 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes\n does not install (or even require) ANY backends, it is up to\n the user to do that.\n see eg: http://pyvisa.readthedocs.org/en/stable/names.html\n \"\"\"\n # in case we're changing the address - close the old handle first\n if getattr(self, 'visa_handle', None):\n self.visa_handle.close()\n\n if address and '@' in address:\n address, visa_library = address.split('@')\n resource_manager = visa.ResourceManager('@' + visa_library)\n else:\n resource_manager = visa.ResourceManager()\n\n self.visa_handle = resource_manager.open_resource(address)\n\n self.visa_handle.clear()\n self._address = address\n\n def set_terminator(self, terminator):\n r\"\"\"\n Change the read terminator to use.\n\n Args:\n terminator (str): Character(s) to look for at the end of a read.\n eg. '\\r\\n'.\n \"\"\"\n self.visa_handle.read_termination = terminator\n self._terminator = terminator\n\n def _set_visa_timeout(self, timeout):\n if timeout is None:\n self.visa_handle.timeout = None\n else:\n # pyvisa uses milliseconds but we use seconds\n self.visa_handle.timeout = timeout * 1000.0\n\n def _get_visa_timeout(self):\n timeout_ms = self.visa_handle.timeout\n if timeout_ms is None:\n return None\n else:\n # pyvisa uses milliseconds but we use seconds\n return timeout_ms / 1000\n\n def close(self):\n \"\"\"Disconnect and irreversibly tear down the instrument.\"\"\"\n if getattr(self, 'visa_handle', None):\n self.visa_handle.close()\n super().close()\n\n def check_error(self, ret_code):\n \"\"\"\n Default error checking, raises an error if return code !=0.\n\n Does not differentiate between warnings or specific error messages.\n Override this function in your driver if you want to add specific\n error messages.\n\n Args:\n ret_code (int): A Visa error code. See eg:\n https://github.com/hgrecco/pyvisa/blob/master/pyvisa/errors.py\n\n Raises:\n visa.VisaIOError: if ``ret_code`` indicates a communication\n problem.\n \"\"\"\n if ret_code != 0:\n raise visa.VisaIOError(ret_code)\n\n def write_raw(self, cmd):\n \"\"\"\n Low-level interface to ``visa_handle.write``.\n\n Args:\n cmd (str): The command to send to the instrument.\n \"\"\"\n nr_bytes_written, ret_code = self.visa_handle.write(cmd)\n self.check_error(ret_code)\n\n def ask_raw(self, cmd):\n \"\"\"\n Low-level interface to ``visa_handle.ask``.\n\n Args:\n cmd (str): The command to send to the instrument.\n\n Returns:\n str: The instrument's response.\n \"\"\"\n return self.visa_handle.ask(cmd)\n\n def snapshot_base(self, update=False):\n \"\"\"\n State of the instrument as a JSON-compatible dict.\n\n Args:\n update (bool): If True, update the state by querying the\n instrument. If False, just use the latest values in memory.\n\n Returns:\n dict: base snapshot\n \"\"\"\n snap = super().snapshot_base(update=update)\n\n snap['address'] = self._address\n snap['terminator'] = self._terminator\n snap['timeout'] = self.timeout.get()\n\n return snap\n", "path": "qcodes/instrument/visa.py"}]} | 2,287 | 251 |
gh_patches_debug_25982 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-1241 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Language Tree isn't handled correctly by cache in Page Tree
### Describe the Bug
So far, our Table rows aren't consistent after we delete a language tree node and recreate it. We should probably empty the cacheops cache after we delete a language tree node.
</issue>
<code>
[start of integreat_cms/cms/forms/language_tree/language_tree_node_form.py]
1 import logging
2
3 from django import forms
4 from django.utils.text import capfirst
5 from django.utils.translation import ugettext_lazy as _
6
7 from ..custom_model_form import CustomModelForm
8 from ..custom_tree_node_form import CustomTreeNodeForm
9 from ...models import Language, LanguageTreeNode
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class LanguageTreeNodeForm(CustomModelForm, CustomTreeNodeForm):
16 """
17 Form for creating and modifying language tree node objects
18 """
19
20 parent = forms.ModelChoiceField(
21 queryset=LanguageTreeNode.objects.all(),
22 required=False,
23 label=capfirst(LanguageTreeNode._meta.get_field("parent").verbose_name),
24 )
25
26 class Meta:
27 """
28 This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`
29 for more information.
30 """
31
32 #: The model of this :class:`django.forms.ModelForm`
33 model = LanguageTreeNode
34 #: The fields of the model which should be handled by this form
35 fields = ["language", "visible", "active"]
36
37 def __init__(self, **kwargs):
38 r"""
39 Initialize language tree node form
40
41 :param \**kwargs: The supplied keyword arguments
42 :type \**kwargs: dict
43 """
44
45 if "data" in kwargs:
46 # Copy QueryDict because it is immutable
47 data = kwargs.pop("data").copy()
48 # Use the parent node as value for the ref node
49 data["_ref_node_id"] = data["parent"]
50 data["_position"] = "first-child"
51 # Set the kwargs to updated POST data again
52 kwargs["data"] = data
53
54 # Instantiate CustomModelForm
55 super().__init__(**kwargs)
56
57 parent_queryset = self.instance.region.language_tree_nodes
58
59 if self.instance.id:
60 descendant_ids = [
61 descendant.id
62 for descendant in self.instance.get_cached_descendants(
63 include_self=True
64 )
65 ]
66 parent_queryset = parent_queryset.exclude(id__in=descendant_ids)
67 self.fields["parent"].initial = self.instance.parent_id
68 excluded_languages = [
69 language.id
70 for language in self.instance.region.languages
71 if language != self.instance.language
72 ]
73 else:
74 excluded_languages = [
75 language.id for language in self.instance.region.languages
76 ]
77
78 # limit possible parents to nodes of current region
79 self.fields["parent"].queryset = parent_queryset
80 self.fields["_ref_node_id"].choices = self.fields["parent"].choices
81 # limit possible languages to those which are not yet included in the tree
82 self.fields["language"].queryset = Language.objects.exclude(
83 id__in=excluded_languages
84 )
85
86 def clean(self):
87 """
88 Validate form fields which depend on each other, see :meth:`django.forms.Form.clean`:
89 Don't allow multiple root nodes for one region:
90 If self is a root node and the region already has a default language, raise a
91 :class:`~django.core.exceptions.ValidationError`.
92
93 :return: The cleaned form data
94 :rtype: dict
95 """
96 cleaned_data = super().clean()
97 default_language = self.instance.region.default_language
98 # There are two cases in which this error is thrown.
99 # Both cases include that the parent field is None.
100 # 1. The instance does exist:
101 # - The default language is different from the instance language
102 # 2. The instance does not exist:
103 # - The default language exists
104 if not cleaned_data.get("parent") and (
105 (self.instance.id and default_language != self.instance.language)
106 or (not self.instance.id and default_language)
107 ):
108 self.add_error(
109 "parent",
110 forms.ValidationError(
111 _(
112 "This region has already a default language."
113 "Please specify a source language for this language."
114 ),
115 code="invalid",
116 ),
117 )
118 logger.debug(
119 "LanguageTreeNodeForm validated [2] with cleaned data %r", cleaned_data
120 )
121 return cleaned_data
122
[end of integreat_cms/cms/forms/language_tree/language_tree_node_form.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py
--- a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py
+++ b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py
@@ -4,6 +4,8 @@
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
+from cacheops import invalidate_obj
+
from ..custom_model_form import CustomModelForm
from ..custom_tree_node_form import CustomTreeNodeForm
from ...models import Language, LanguageTreeNode
@@ -119,3 +121,25 @@
"LanguageTreeNodeForm validated [2] with cleaned data %r", cleaned_data
)
return cleaned_data
+
+ def save(self, commit=True):
+ """
+ This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to flush
+ the cache after commiting.
+
+ :param commit: Whether or not the changes should be written to the database
+ :type commit: bool
+
+ :return: The saved page translation object
+ :rtype: ~integreat_cms.cms.models.pages.page_translation.PageTranslation
+ """
+ # Save CustomModelForm and flush Cache
+ result = super().save(commit=commit)
+
+ for page in self.instance.region.pages.all():
+ invalidate_obj(page)
+ for poi in self.instance.region.pois.all():
+ invalidate_obj(poi)
+ for event in self.instance.region.events.all():
+ invalidate_obj(event)
+ return result
| {"golden_diff": "diff --git a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py\n--- a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py\n+++ b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py\n@@ -4,6 +4,8 @@\n from django.utils.text import capfirst\n from django.utils.translation import ugettext_lazy as _\n \n+from cacheops import invalidate_obj\n+\n from ..custom_model_form import CustomModelForm\n from ..custom_tree_node_form import CustomTreeNodeForm\n from ...models import Language, LanguageTreeNode\n@@ -119,3 +121,25 @@\n \"LanguageTreeNodeForm validated [2] with cleaned data %r\", cleaned_data\n )\n return cleaned_data\n+\n+ def save(self, commit=True):\n+ \"\"\"\n+ This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to flush\n+ the cache after commiting.\n+\n+ :param commit: Whether or not the changes should be written to the database\n+ :type commit: bool\n+\n+ :return: The saved page translation object\n+ :rtype: ~integreat_cms.cms.models.pages.page_translation.PageTranslation\n+ \"\"\"\n+ # Save CustomModelForm and flush Cache\n+ result = super().save(commit=commit)\n+\n+ for page in self.instance.region.pages.all():\n+ invalidate_obj(page)\n+ for poi in self.instance.region.pois.all():\n+ invalidate_obj(poi)\n+ for event in self.instance.region.events.all():\n+ invalidate_obj(event)\n+ return result\n", "issue": "Language Tree isn't handled correctly by cache in Page Tree\n### Describe the Bug\r\nSo far, our Table rows aren't consistent after we delete a language tree node and recreate it. We should probably empty the cacheops cache after we delete a language tree node.\r\n\n", "before_files": [{"content": "import logging\n\nfrom django import forms\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..custom_model_form import CustomModelForm\nfrom ..custom_tree_node_form import CustomTreeNodeForm\nfrom ...models import Language, LanguageTreeNode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LanguageTreeNodeForm(CustomModelForm, CustomTreeNodeForm):\n \"\"\"\n Form for creating and modifying language tree node objects\n \"\"\"\n\n parent = forms.ModelChoiceField(\n queryset=LanguageTreeNode.objects.all(),\n required=False,\n label=capfirst(LanguageTreeNode._meta.get_field(\"parent\").verbose_name),\n )\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = LanguageTreeNode\n #: The fields of the model which should be handled by this form\n fields = [\"language\", \"visible\", \"active\"]\n\n def __init__(self, **kwargs):\n r\"\"\"\n Initialize language tree node form\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n \"\"\"\n\n if \"data\" in kwargs:\n # Copy QueryDict because it is immutable\n data = kwargs.pop(\"data\").copy()\n # Use the parent node as value for the ref node\n data[\"_ref_node_id\"] = data[\"parent\"]\n data[\"_position\"] = \"first-child\"\n # Set the kwargs to updated POST data again\n kwargs[\"data\"] = data\n\n # Instantiate CustomModelForm\n super().__init__(**kwargs)\n\n parent_queryset = self.instance.region.language_tree_nodes\n\n if self.instance.id:\n descendant_ids = [\n descendant.id\n for descendant in self.instance.get_cached_descendants(\n include_self=True\n )\n ]\n parent_queryset = parent_queryset.exclude(id__in=descendant_ids)\n self.fields[\"parent\"].initial = self.instance.parent_id\n excluded_languages = [\n language.id\n for language in self.instance.region.languages\n if language != self.instance.language\n ]\n else:\n excluded_languages = [\n language.id for language in self.instance.region.languages\n ]\n\n # limit possible parents to nodes of current region\n self.fields[\"parent\"].queryset = parent_queryset\n self.fields[\"_ref_node_id\"].choices = self.fields[\"parent\"].choices\n # limit possible languages to those which are not yet included in the tree\n self.fields[\"language\"].queryset = Language.objects.exclude(\n id__in=excluded_languages\n )\n\n def clean(self):\n \"\"\"\n Validate form fields which depend on each other, see :meth:`django.forms.Form.clean`:\n Don't allow multiple root nodes for one region:\n If self is a root node and the region already has a default language, raise a\n :class:`~django.core.exceptions.ValidationError`.\n\n :return: The cleaned form data\n :rtype: dict\n \"\"\"\n cleaned_data = super().clean()\n default_language = self.instance.region.default_language\n # There are two cases in which this error is thrown.\n # Both cases include that the parent field is None.\n # 1. The instance does exist:\n # - The default language is different from the instance language\n # 2. The instance does not exist:\n # - The default language exists\n if not cleaned_data.get(\"parent\") and (\n (self.instance.id and default_language != self.instance.language)\n or (not self.instance.id and default_language)\n ):\n self.add_error(\n \"parent\",\n forms.ValidationError(\n _(\n \"This region has already a default language.\"\n \"Please specify a source language for this language.\"\n ),\n code=\"invalid\",\n ),\n )\n logger.debug(\n \"LanguageTreeNodeForm validated [2] with cleaned data %r\", cleaned_data\n )\n return cleaned_data\n", "path": "integreat_cms/cms/forms/language_tree/language_tree_node_form.py"}]} | 1,719 | 365 |
gh_patches_debug_11755 | rasdani/github-patches | git_diff | kserve__kserve-705 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KFServing Transformer does not work with onnxruntime and trtis model server
/kind bug
**What steps did you take and what happened:**
Currently kfserving transformer does not work with onnxruntime and trtis model server due to following validation because their data plane do not support `instances` key in JSON.
https://github.com/kubeflow/kfserving/blob/master/python/kfserving/kfserving/handlers/http.py#L24
**What did you expect to happen:**
Before we get to V2 data plane with agreed protocol across model servers, we should relax this validation.
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version: 0.2.2
- Kubeflow version:
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
</issue>
<code>
[start of python/kfserving/kfserving/handlers/http.py]
1 import tornado.web
2 import json
3 from typing import Dict
4 from http import HTTPStatus
5 from kfserving.kfmodel import KFModel
6
7
8 class HTTPHandler(tornado.web.RequestHandler):
9 def initialize(self, models: Dict[str, KFModel]):
10 self.models = models # pylint:disable=attribute-defined-outside-init
11
12 def get_model(self, name: str):
13 if name not in self.models:
14 raise tornado.web.HTTPError(
15 status_code=HTTPStatus.NOT_FOUND,
16 reason="Model with name %s does not exist." % name
17 )
18 model = self.models[name]
19 if not model.ready:
20 model.load()
21 return model
22
23 def validate(self, request):
24 if "instances" not in request:
25 raise tornado.web.HTTPError(
26 status_code=HTTPStatus.BAD_REQUEST,
27 reason="Expected key \"instances\" in request body"
28 )
29
30 if not isinstance(request["instances"], list):
31 raise tornado.web.HTTPError(
32 status_code=HTTPStatus.BAD_REQUEST,
33 reason="Expected \"instances\" to be a list"
34 )
35 return request
36
37
38 class PredictHandler(HTTPHandler):
39 def post(self, name: str):
40 model = self.get_model(name)
41 try:
42 body = json.loads(self.request.body)
43 except json.decoder.JSONDecodeError as e:
44 raise tornado.web.HTTPError(
45 status_code=HTTPStatus.BAD_REQUEST,
46 reason="Unrecognized request format: %s" % e
47 )
48 request = model.preprocess(body)
49 request = self.validate(request)
50 response = model.predict(request)
51 response = model.postprocess(response)
52 self.write(response)
53
54
55 class ExplainHandler(HTTPHandler):
56 def post(self, name: str):
57 model = self.get_model(name)
58 try:
59 body = json.loads(self.request.body)
60 except json.decoder.JSONDecodeError as e:
61 raise tornado.web.HTTPError(
62 status_code=HTTPStatus.BAD_REQUEST,
63 reason="Unrecognized request format: %s" % e
64 )
65 request = model.preprocess(body)
66 request = self.validate(request)
67 response = model.explain(request)
68 response = model.postprocess(response)
69 self.write(response)
70
[end of python/kfserving/kfserving/handlers/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kfserving/kfserving/handlers/http.py b/python/kfserving/kfserving/handlers/http.py
--- a/python/kfserving/kfserving/handlers/http.py
+++ b/python/kfserving/kfserving/handlers/http.py
@@ -21,13 +21,7 @@
return model
def validate(self, request):
- if "instances" not in request:
- raise tornado.web.HTTPError(
- status_code=HTTPStatus.BAD_REQUEST,
- reason="Expected key \"instances\" in request body"
- )
-
- if not isinstance(request["instances"], list):
+ if "instances" in request and not isinstance(request["instances"], list):
raise tornado.web.HTTPError(
status_code=HTTPStatus.BAD_REQUEST,
reason="Expected \"instances\" to be a list"
| {"golden_diff": "diff --git a/python/kfserving/kfserving/handlers/http.py b/python/kfserving/kfserving/handlers/http.py\n--- a/python/kfserving/kfserving/handlers/http.py\n+++ b/python/kfserving/kfserving/handlers/http.py\n@@ -21,13 +21,7 @@\n return model\n \n def validate(self, request):\n- if \"instances\" not in request:\n- raise tornado.web.HTTPError(\n- status_code=HTTPStatus.BAD_REQUEST,\n- reason=\"Expected key \\\"instances\\\" in request body\"\n- )\n-\n- if not isinstance(request[\"instances\"], list):\n+ if \"instances\" in request and not isinstance(request[\"instances\"], list):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" to be a list\"\n", "issue": "KFServing Transformer does not work with onnxruntime and trtis model server\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nCurrently kfserving transformer does not work with onnxruntime and trtis model server due to following validation because their data plane do not support `instances` key in JSON.\r\nhttps://github.com/kubeflow/kfserving/blob/master/python/kfserving/kfserving/handlers/http.py#L24 \r\n\r\n\r\n\r\n**What did you expect to happen:**\r\nBefore we get to V2 data plane with agreed protocol across model servers, we should relax this validation.\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version: 0.2.2\r\n- Kubeflow version:\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "import tornado.web\nimport json\nfrom typing import Dict\nfrom http import HTTPStatus\nfrom kfserving.kfmodel import KFModel\n\n\nclass HTTPHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get_model(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.NOT_FOUND,\n reason=\"Model with name %s does not exist.\" % name\n )\n model = self.models[name]\n if not model.ready:\n model.load()\n return model\n\n def validate(self, request):\n if \"instances\" not in request:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected key \\\"instances\\\" in request body\"\n )\n\n if not isinstance(request[\"instances\"], list):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" to be a list\"\n )\n return request\n\n\nclass PredictHandler(HTTPHandler):\n def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = model.predict(request)\n response = model.postprocess(response)\n self.write(response)\n\n\nclass ExplainHandler(HTTPHandler):\n def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = model.explain(request)\n response = model.postprocess(response)\n self.write(response)\n", "path": "python/kfserving/kfserving/handlers/http.py"}]} | 1,362 | 185 |
gh_patches_debug_7329 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8842 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-3213] [Bug] nitpick: incorrect docstring
should be `BaseAdapter.capabilities()` not `BaseAdapter.has_feature()`
https://github.com/dbt-labs/dbt-core/blob/1baebb423c82a9c645e59b390fc3a69089623600/core/dbt/adapters/capability.py#L7
</issue>
<code>
[start of core/dbt/adapters/capability.py]
1 from dataclasses import dataclass
2 from enum import Enum
3 from typing import Optional, DefaultDict, Mapping
4
5
6 class Capability(str, Enum):
7 """Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()"""
8
9 SchemaMetadataByRelations = "SchemaMetadataByRelations"
10 """Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving
11 all the relations in a schema."""
12
13 TableLastModifiedMetadata = "TableLastModifiedMetadata"
14 """Indicates support for determining the time of the last table modification by querying database metadata."""
15
16
17 class Support(str, Enum):
18 Unknown = "Unknown"
19 """The adapter has not declared whether this capability is a feature of the underlying DBMS."""
20
21 Unsupported = "Unsupported"
22 """This capability is not possible with the underlying DBMS, so the adapter does not implement related macros."""
23
24 NotImplemented = "NotImplemented"
25 """This capability is available in the underlying DBMS, but support has not yet been implemented in the adapter."""
26
27 Versioned = "Versioned"
28 """Some versions of the DBMS supported by the adapter support this capability and the adapter has implemented any
29 macros needed to use it."""
30
31 Full = "Full"
32 """All versions of the DBMS supported by the adapter support this capability and the adapter has implemented any
33 macros needed to use it."""
34
35
36 @dataclass
37 class CapabilitySupport:
38 support: Support
39 first_version: Optional[str] = None
40
41 def __bool__(self):
42 return self.support == Support.Versioned or self.support == Support.Full
43
44
45 class CapabilityDict(DefaultDict[Capability, CapabilitySupport]):
46 def __init__(self, vals: Mapping[Capability, CapabilitySupport]):
47 super().__init__(self._default)
48 self.update(vals)
49
50 @staticmethod
51 def _default():
52 return CapabilitySupport(support=Support.Unknown)
53
[end of core/dbt/adapters/capability.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dbt/adapters/capability.py b/core/dbt/adapters/capability.py
--- a/core/dbt/adapters/capability.py
+++ b/core/dbt/adapters/capability.py
@@ -4,7 +4,7 @@
class Capability(str, Enum):
- """Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()"""
+ """Enumeration of optional adapter features which can be probed using BaseAdapter.capabilities()"""
SchemaMetadataByRelations = "SchemaMetadataByRelations"
"""Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving
| {"golden_diff": "diff --git a/core/dbt/adapters/capability.py b/core/dbt/adapters/capability.py\n--- a/core/dbt/adapters/capability.py\n+++ b/core/dbt/adapters/capability.py\n@@ -4,7 +4,7 @@\n \n \n class Capability(str, Enum):\n- \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()\"\"\"\n+ \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.capabilities()\"\"\"\n \n SchemaMetadataByRelations = \"SchemaMetadataByRelations\"\n \"\"\"Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving\n", "issue": "[CT-3213] [Bug] nitpick: incorrect docstring\nshould be `BaseAdapter.capabilities()` not `BaseAdapter.has_feature()`\r\n\r\nhttps://github.com/dbt-labs/dbt-core/blob/1baebb423c82a9c645e59b390fc3a69089623600/core/dbt/adapters/capability.py#L7\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Optional, DefaultDict, Mapping\n\n\nclass Capability(str, Enum):\n \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()\"\"\"\n\n SchemaMetadataByRelations = \"SchemaMetadataByRelations\"\n \"\"\"Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving\n all the relations in a schema.\"\"\"\n\n TableLastModifiedMetadata = \"TableLastModifiedMetadata\"\n \"\"\"Indicates support for determining the time of the last table modification by querying database metadata.\"\"\"\n\n\nclass Support(str, Enum):\n Unknown = \"Unknown\"\n \"\"\"The adapter has not declared whether this capability is a feature of the underlying DBMS.\"\"\"\n\n Unsupported = \"Unsupported\"\n \"\"\"This capability is not possible with the underlying DBMS, so the adapter does not implement related macros.\"\"\"\n\n NotImplemented = \"NotImplemented\"\n \"\"\"This capability is available in the underlying DBMS, but support has not yet been implemented in the adapter.\"\"\"\n\n Versioned = \"Versioned\"\n \"\"\"Some versions of the DBMS supported by the adapter support this capability and the adapter has implemented any\n macros needed to use it.\"\"\"\n\n Full = \"Full\"\n \"\"\"All versions of the DBMS supported by the adapter support this capability and the adapter has implemented any\n macros needed to use it.\"\"\"\n\n\n@dataclass\nclass CapabilitySupport:\n support: Support\n first_version: Optional[str] = None\n\n def __bool__(self):\n return self.support == Support.Versioned or self.support == Support.Full\n\n\nclass CapabilityDict(DefaultDict[Capability, CapabilitySupport]):\n def __init__(self, vals: Mapping[Capability, CapabilitySupport]):\n super().__init__(self._default)\n self.update(vals)\n\n @staticmethod\n def _default():\n return CapabilitySupport(support=Support.Unknown)\n", "path": "core/dbt/adapters/capability.py"}]} | 1,137 | 139 |
gh_patches_debug_47832 | rasdani/github-patches | git_diff | getredash__redash-5354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Minor Salesforce runner fix
<!--
We use GitHub only for bug reports 🐛
Anything else should be posted to https://discuss.redash.io 👫
🚨For support, help & questions use https://discuss.redash.io/c/support
💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests
**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.
-->
### Issue Summary
A Security Token isn't required in all SFDC environments - depending on configuration. See [here](https://help.salesforce.com/articleView?id=000331668&type=1&mode=1) for more information.
### Steps to Reproduce
1. Add Salesforce as a data source where a token isn't required (and cannot be generated)
2. Cannot proceed without required field
### Technical details:
https://github.com/getredash/redash/blob/be56035bd6d9856361edc6b23d30a38c8f2d2be2/redash/query_runner/salesforce.py#L81
Just remove `token` from the `required` list. Seemed like it'd be faster to create an issue than submit a PR for such a small change
</issue>
<code>
[start of redash/query_runner/salesforce.py]
1 import logging
2 import re
3 from collections import OrderedDict
4
5 from redash.query_runner import (
6 TYPE_BOOLEAN,
7 TYPE_DATE,
8 TYPE_DATETIME,
9 TYPE_FLOAT,
10 TYPE_INTEGER,
11 TYPE_STRING,
12 BaseQueryRunner,
13 register,
14 )
15 from redash.utils import json_dumps
16
17 logger = logging.getLogger(__name__)
18
19 try:
20 from simple_salesforce import Salesforce as SimpleSalesforce
21 from simple_salesforce import SalesforceError
22 from simple_salesforce.api import DEFAULT_API_VERSION
23
24 enabled = True
25 except ImportError:
26 enabled = False
27
28 # See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
29 TYPES_MAP = dict(
30 id=TYPE_STRING,
31 string=TYPE_STRING,
32 currency=TYPE_FLOAT,
33 reference=TYPE_STRING,
34 double=TYPE_FLOAT,
35 picklist=TYPE_STRING,
36 date=TYPE_DATE,
37 url=TYPE_STRING,
38 phone=TYPE_STRING,
39 textarea=TYPE_STRING,
40 int=TYPE_INTEGER,
41 datetime=TYPE_DATETIME,
42 boolean=TYPE_BOOLEAN,
43 percent=TYPE_FLOAT,
44 multipicklist=TYPE_STRING,
45 masterrecord=TYPE_STRING,
46 location=TYPE_STRING,
47 JunctionIdList=TYPE_STRING,
48 encryptedstring=TYPE_STRING,
49 email=TYPE_STRING,
50 DataCategoryGroupReference=TYPE_STRING,
51 combobox=TYPE_STRING,
52 calculated=TYPE_STRING,
53 anyType=TYPE_STRING,
54 address=TYPE_STRING,
55 )
56
57 # Query Runner for Salesforce SOQL Queries
58 # For example queries, see:
59 # https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
60
61
62 class Salesforce(BaseQueryRunner):
63 should_annotate_query = False
64
65 @classmethod
66 def enabled(cls):
67 return enabled
68
69 @classmethod
70 def configuration_schema(cls):
71 return {
72 "type": "object",
73 "properties": {
74 "username": {"type": "string"},
75 "password": {"type": "string"},
76 "token": {"type": "string", "title": "Security Token"},
77 "sandbox": {"type": "boolean"},
78 "api_version": {
79 "type": "string",
80 "title": "Salesforce API Version",
81 "default": DEFAULT_API_VERSION,
82 },
83 },
84 "required": ["username", "password", "token"],
85 "secret": ["password", "token"],
86 }
87
88 def test_connection(self):
89 response = self._get_sf().describe()
90 if response is None:
91 raise Exception("Failed describing objects.")
92 pass
93
94 def _get_sf(self):
95 sf = SimpleSalesforce(
96 username=self.configuration["username"],
97 password=self.configuration["password"],
98 security_token=self.configuration["token"],
99 sandbox=self.configuration.get("sandbox", False),
100 version=self.configuration.get("api_version", DEFAULT_API_VERSION),
101 client_id="Redash",
102 )
103 return sf
104
105 def _clean_value(self, value):
106 if isinstance(value, OrderedDict) and "records" in value:
107 value = value["records"]
108 for row in value:
109 row.pop("attributes", None)
110 return value
111
112 def _get_value(self, dct, dots):
113 for key in dots.split("."):
114 if dct is not None and key in dct:
115 dct = dct.get(key)
116 else:
117 dct = None
118 return dct
119
120 def _get_column_name(self, key, parents=[]):
121 return ".".join(parents + [key])
122
123 def _build_columns(self, sf, child, parents=[]):
124 child_type = child["attributes"]["type"]
125 child_desc = sf.__getattr__(child_type).describe()
126 child_type_map = dict((f["name"], f["type"]) for f in child_desc["fields"])
127 columns = []
128 for key in child.keys():
129 if key != "attributes":
130 if isinstance(child[key], OrderedDict) and "attributes" in child[key]:
131 columns.extend(self._build_columns(sf, child[key], parents + [key]))
132 else:
133 column_name = self._get_column_name(key, parents)
134 key_type = child_type_map.get(key, "string")
135 column_type = TYPES_MAP.get(key_type, TYPE_STRING)
136 columns.append((column_name, column_type))
137 return columns
138
139 def _build_rows(self, columns, records):
140 rows = []
141 for record in records:
142 record.pop("attributes", None)
143 row = dict()
144 for column in columns:
145 key = column[0]
146 value = self._get_value(record, key)
147 row[key] = self._clean_value(value)
148 rows.append(row)
149 return rows
150
151 def run_query(self, query, user):
152 logger.debug("Salesforce is about to execute query: %s", query)
153 query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
154 try:
155 columns = []
156 rows = []
157 sf = self._get_sf()
158 response = sf.query_all(query)
159 records = response["records"]
160 if response["totalSize"] > 0 and len(records) == 0:
161 columns = self.fetch_columns([("Count", TYPE_INTEGER)])
162 rows = [{"Count": response["totalSize"]}]
163 elif len(records) > 0:
164 cols = self._build_columns(sf, records[0])
165 rows = self._build_rows(cols, records)
166 columns = self.fetch_columns(cols)
167 error = None
168 data = {"columns": columns, "rows": rows}
169 json_data = json_dumps(data)
170 except SalesforceError as err:
171 error = err.content
172 json_data = None
173 return json_data, error
174
175 def get_schema(self, get_stats=False):
176 sf = self._get_sf()
177 response = sf.describe()
178 if response is None:
179 raise Exception("Failed describing objects.")
180
181 schema = {}
182 for sobject in response["sobjects"]:
183 table_name = sobject["name"]
184 if sobject["queryable"] is True and table_name not in schema:
185 desc = sf.__getattr__(sobject["name"]).describe()
186 fields = desc["fields"]
187 schema[table_name] = {
188 "name": table_name,
189 "columns": [f["name"] for f in fields],
190 }
191 return list(schema.values())
192
193
194 register(Salesforce)
195
[end of redash/query_runner/salesforce.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py
--- a/redash/query_runner/salesforce.py
+++ b/redash/query_runner/salesforce.py
@@ -81,7 +81,7 @@
"default": DEFAULT_API_VERSION,
},
},
- "required": ["username", "password", "token"],
+ "required": ["username", "password"],
"secret": ["password", "token"],
}
| {"golden_diff": "diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py\n--- a/redash/query_runner/salesforce.py\n+++ b/redash/query_runner/salesforce.py\n@@ -81,7 +81,7 @@\n \"default\": DEFAULT_API_VERSION,\n },\n },\n- \"required\": [\"username\", \"password\", \"token\"],\n+ \"required\": [\"username\", \"password\"],\n \"secret\": [\"password\", \"token\"],\n }\n", "issue": "Minor Salesforce runner fix\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nA Security Token isn't required in all SFDC environments - depending on configuration. See [here](https://help.salesforce.com/articleView?id=000331668&type=1&mode=1) for more information.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Add Salesforce as a data source where a token isn't required (and cannot be generated)\r\n2. Cannot proceed without required field\r\n\r\n### Technical details:\r\n\r\nhttps://github.com/getredash/redash/blob/be56035bd6d9856361edc6b23d30a38c8f2d2be2/redash/query_runner/salesforce.py#L81\r\n\r\nJust remove `token` from the `required` list. Seemed like it'd be faster to create an issue than submit a PR for such a small change\n", "before_files": [{"content": "import logging\nimport re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import (\n TYPE_BOOLEAN,\n TYPE_DATE,\n TYPE_DATETIME,\n TYPE_FLOAT,\n TYPE_INTEGER,\n TYPE_STRING,\n BaseQueryRunner,\n register,\n)\nfrom redash.utils import json_dumps\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from simple_salesforce import Salesforce as SimpleSalesforce\n from simple_salesforce import SalesforceError\n from simple_salesforce.api import DEFAULT_API_VERSION\n\n enabled = True\nexcept ImportError:\n enabled = False\n\n# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm\nTYPES_MAP = dict(\n id=TYPE_STRING,\n string=TYPE_STRING,\n currency=TYPE_FLOAT,\n reference=TYPE_STRING,\n double=TYPE_FLOAT,\n picklist=TYPE_STRING,\n date=TYPE_DATE,\n url=TYPE_STRING,\n phone=TYPE_STRING,\n textarea=TYPE_STRING,\n int=TYPE_INTEGER,\n datetime=TYPE_DATETIME,\n boolean=TYPE_BOOLEAN,\n percent=TYPE_FLOAT,\n multipicklist=TYPE_STRING,\n masterrecord=TYPE_STRING,\n location=TYPE_STRING,\n JunctionIdList=TYPE_STRING,\n encryptedstring=TYPE_STRING,\n email=TYPE_STRING,\n DataCategoryGroupReference=TYPE_STRING,\n combobox=TYPE_STRING,\n calculated=TYPE_STRING,\n anyType=TYPE_STRING,\n address=TYPE_STRING,\n)\n\n# Query Runner for Salesforce SOQL Queries\n# For example queries, see:\n# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm\n\n\nclass Salesforce(BaseQueryRunner):\n should_annotate_query = False\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"token\": {\"type\": \"string\", \"title\": \"Security Token\"},\n \"sandbox\": {\"type\": \"boolean\"},\n \"api_version\": {\n \"type\": \"string\",\n \"title\": \"Salesforce API Version\",\n \"default\": DEFAULT_API_VERSION,\n },\n },\n \"required\": [\"username\", \"password\", \"token\"],\n \"secret\": [\"password\", \"token\"],\n }\n\n def test_connection(self):\n response = self._get_sf().describe()\n if response is None:\n raise Exception(\"Failed describing objects.\")\n pass\n\n def _get_sf(self):\n sf = SimpleSalesforce(\n username=self.configuration[\"username\"],\n password=self.configuration[\"password\"],\n security_token=self.configuration[\"token\"],\n sandbox=self.configuration.get(\"sandbox\", False),\n version=self.configuration.get(\"api_version\", DEFAULT_API_VERSION),\n client_id=\"Redash\",\n )\n return sf\n\n def _clean_value(self, value):\n if isinstance(value, OrderedDict) and \"records\" in value:\n value = value[\"records\"]\n for row in value:\n row.pop(\"attributes\", None)\n return value\n\n def _get_value(self, dct, dots):\n for key in dots.split(\".\"):\n if dct is not None and key in dct:\n dct = dct.get(key)\n else:\n dct = None\n return dct\n\n def _get_column_name(self, key, parents=[]):\n return \".\".join(parents + [key])\n\n def _build_columns(self, sf, child, parents=[]):\n child_type = child[\"attributes\"][\"type\"]\n child_desc = sf.__getattr__(child_type).describe()\n child_type_map = dict((f[\"name\"], f[\"type\"]) for f in child_desc[\"fields\"])\n columns = []\n for key in child.keys():\n if key != \"attributes\":\n if isinstance(child[key], OrderedDict) and \"attributes\" in child[key]:\n columns.extend(self._build_columns(sf, child[key], parents + [key]))\n else:\n column_name = self._get_column_name(key, parents)\n key_type = child_type_map.get(key, \"string\")\n column_type = TYPES_MAP.get(key_type, TYPE_STRING)\n columns.append((column_name, column_type))\n return columns\n\n def _build_rows(self, columns, records):\n rows = []\n for record in records:\n record.pop(\"attributes\", None)\n row = dict()\n for column in columns:\n key = column[0]\n value = self._get_value(record, key)\n row[key] = self._clean_value(value)\n rows.append(row)\n return rows\n\n def run_query(self, query, user):\n logger.debug(\"Salesforce is about to execute query: %s\", query)\n query = re.sub(r\"/\\*(.|\\n)*?\\*/\", \"\", query).strip()\n try:\n columns = []\n rows = []\n sf = self._get_sf()\n response = sf.query_all(query)\n records = response[\"records\"]\n if response[\"totalSize\"] > 0 and len(records) == 0:\n columns = self.fetch_columns([(\"Count\", TYPE_INTEGER)])\n rows = [{\"Count\": response[\"totalSize\"]}]\n elif len(records) > 0:\n cols = self._build_columns(sf, records[0])\n rows = self._build_rows(cols, records)\n columns = self.fetch_columns(cols)\n error = None\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n except SalesforceError as err:\n error = err.content\n json_data = None\n return json_data, error\n\n def get_schema(self, get_stats=False):\n sf = self._get_sf()\n response = sf.describe()\n if response is None:\n raise Exception(\"Failed describing objects.\")\n\n schema = {}\n for sobject in response[\"sobjects\"]:\n table_name = sobject[\"name\"]\n if sobject[\"queryable\"] is True and table_name not in schema:\n desc = sf.__getattr__(sobject[\"name\"]).describe()\n fields = desc[\"fields\"]\n schema[table_name] = {\n \"name\": table_name,\n \"columns\": [f[\"name\"] for f in fields],\n }\n return list(schema.values())\n\n\nregister(Salesforce)\n", "path": "redash/query_runner/salesforce.py"}]} | 2,714 | 105 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.