problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_7752
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-29188
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "View stream subscribers" page
We should add a "View stream subscribers" page (below "Create a stream" under Streams & topics). It should document the right sidebar UI in #26717 (currently test-deployed on CZO), as well as how to see subscribers via settings.
</issue>
<code>
[start of zerver/lib/markdown/tabbed_sections.py]
1 import re
2 from typing import Any, Dict, List, Mapping, Optional
3
4 import markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7 from typing_extensions import override
8
9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES
10
11 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$")
12 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$")
13 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|([^}]+)\}$")
14
15 TABBED_SECTION_TEMPLATE = """
16 <div class="tabbed-section {tab_class}" markdown="1">
17 {nav_bar}
18 <div class="blocks">
19 {blocks}
20 </div>
21 </div>
22 """.strip()
23
24 NAV_BAR_TEMPLATE = """
25 <ul class="nav">
26 {tabs}
27 </ul>
28 """.strip()
29
30 NAV_LIST_ITEM_TEMPLATE = """
31 <li data-tab-key="{data_tab_key}" tabindex="0">{label}</li>
32 """.strip()
33
34 DIV_TAB_CONTENT_TEMPLATE = """
35 <div data-tab-key="{data_tab_key}" markdown="1">
36 {content}
37 </div>
38 """.strip()
39
40 # If adding new entries here, also check if you need to update
41 # tabbed-instructions.js
42 TAB_SECTION_LABELS = {
43 "desktop-web": "Desktop/Web",
44 "ios": "iOS",
45 "android": "Android",
46 "mac": "macOS",
47 "windows": "Windows",
48 "linux": "Linux",
49 "most-systems": "Most systems",
50 "linux-with-apt": "Linux with APT",
51 "python": "Python",
52 "js": "JavaScript",
53 "curl": "curl",
54 "zulip-send": "zulip-send",
55 "web": "Web",
56 "desktop": "Desktop",
57 "mobile": "Mobile",
58 "mm-default": "Default installation",
59 "mm-cloud": "Cloud instance",
60 "mm-docker": "Docker",
61 "mm-gitlab-omnibus": "GitLab Omnibus",
62 "mm-self-hosting-cloud-export": "Self hosting (cloud export)",
63 "require-invitations": "Require invitations",
64 "allow-anyone-to-join": "Allow anyone to join",
65 "restrict-by-email-domain": "Restrict by email domain",
66 "zoom": "Zoom",
67 "jitsi-meet": "Jitsi Meet",
68 "bigbluebutton": "BigBlueButton",
69 "disable": "Disabled",
70 "chrome": "Chrome",
71 "firefox": "Firefox",
72 "desktop-app": "Desktop app",
73 "system-proxy-settings": "System proxy settings",
74 "custom-proxy-settings": "Custom proxy settings",
75 "stream": "From a stream view",
76 "not-stream": "From other views",
77 "via-recent-conversations": "Via recent conversations",
78 "via-inbox-view": "Via inbox view",
79 "via-left-sidebar": "Via left sidebar",
80 "instructions-for-all-platforms": "Instructions for all platforms",
81 "public-streams": "Public streams",
82 "private-streams": "Private streams",
83 "web-public-streams": "Web-public streams",
84 "via-user-card": "Via user card",
85 "via-user-profile": "Via user profile",
86 "via-organization-settings": "Via organization settings",
87 "via-personal-settings": "Via personal settings",
88 "via-stream-settings": "Via stream settings",
89 "default-subdomain": "Default subdomain",
90 "custom-subdomain": "Custom subdomain",
91 "zulip-cloud-standard": "Zulip Cloud Standard",
92 "zulip-cloud-plus": "Zulip Cloud Plus",
93 "request-sponsorship": "Request sponsorship",
94 "request-education-pricing": "Request education pricing",
95 "zulip-cloud": "Zulip Cloud",
96 "self-hosting": "Self hosting",
97 "okta": "Okta",
98 "onelogin": "OneLogin",
99 "azuread": "AzureAD",
100 "keycloak": "Keycloak",
101 "auth0": "Auth0",
102 "logged-in": "If you are logged in",
103 "logged-out": "If you are logged out",
104 "user": "User",
105 "bot": "Bot",
106 "on-sign-up": "On sign-up",
107 "via-paste": "Via paste",
108 "via-drag-and-drop": "Via drag-and-drop",
109 "via-markdown": "Via Markdown",
110 "via-compose-box-buttons": "Via compose box buttons",
111 "stream-compose": "Compose to a stream",
112 "dm-compose": "Compose a DM",
113 "v8": "Zulip Server 8.0+",
114 "v6": "Zulip Server 6.0+",
115 "v4": "Zulip Server 4.0+",
116 "all-versions": "All versions",
117 "for-a-bot": "For a bot",
118 "for-yourself": "For yourself",
119 }
120
121
122 class TabbedSectionsGenerator(Extension):
123 @override
124 def extendMarkdown(self, md: markdown.Markdown) -> None:
125 md.preprocessors.register(
126 TabbedSectionsPreprocessor(md, self.getConfigs()),
127 "tabbed_sections",
128 PREPROCESSOR_PRIORITES["tabbed_sections"],
129 )
130
131
132 class TabbedSectionsPreprocessor(Preprocessor):
133 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
134 super().__init__(md)
135
136 @override
137 def run(self, lines: List[str]) -> List[str]:
138 tab_section = self.parse_tabs(lines)
139 while tab_section:
140 if "tabs" in tab_section:
141 tab_class = "has-tabs"
142 else:
143 tab_class = "no-tabs"
144 tab_section["tabs"] = [
145 {
146 "tab_key": "instructions-for-all-platforms",
147 "start": tab_section["start_tabs_index"],
148 }
149 ]
150 nav_bar = self.generate_nav_bar(tab_section)
151 content_blocks = self.generate_content_blocks(tab_section, lines)
152 rendered_tabs = TABBED_SECTION_TEMPLATE.format(
153 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks
154 )
155
156 start = tab_section["start_tabs_index"]
157 end = tab_section["end_tabs_index"] + 1
158 lines = [*lines[:start], rendered_tabs, *lines[end:]]
159 tab_section = self.parse_tabs(lines)
160 return lines
161
162 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
163 tab_content_blocks = []
164 for index, tab in enumerate(tab_section["tabs"]):
165 start_index = tab["start"] + 1
166 try:
167 # If there are more tabs, we can use the starting index
168 # of the next tab as the ending index of the previous one
169 end_index = tab_section["tabs"][index + 1]["start"]
170 except IndexError:
171 # Otherwise, just use the end of the entire section
172 end_index = tab_section["end_tabs_index"]
173
174 content = "\n".join(lines[start_index:end_index]).strip()
175 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
176 data_tab_key=tab["tab_key"],
177 # Wrapping the content in two newlines is necessary here.
178 # If we don't do this, the inner Markdown does not get
179 # rendered properly.
180 content=f"\n{content}\n",
181 )
182 tab_content_blocks.append(tab_content_block)
183 return "\n".join(tab_content_blocks)
184
185 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
186 li_elements = []
187 for tab in tab_section["tabs"]:
188 tab_key = tab.get("tab_key")
189 tab_label = TAB_SECTION_LABELS.get(tab_key)
190 if tab_label is None:
191 raise ValueError(
192 f"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
193 )
194
195 li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)
196 li_elements.append(li)
197
198 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
199
200 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
201 block: Dict[str, Any] = {}
202 for index, line in enumerate(lines):
203 start_match = START_TABBED_SECTION_REGEX.search(line)
204 if start_match:
205 block["start_tabs_index"] = index
206
207 tab_content_match = TAB_CONTENT_REGEX.search(line)
208 if tab_content_match:
209 block.setdefault("tabs", [])
210 tab = {"start": index, "tab_key": tab_content_match.group(1)}
211 block["tabs"].append(tab)
212
213 end_match = END_TABBED_SECTION_REGEX.search(line)
214 if end_match:
215 block["end_tabs_index"] = index
216 break
217 return block
218
219
220 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
221 return TabbedSectionsGenerator(**kwargs)
222
[end of zerver/lib/markdown/tabbed_sections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -77,6 +77,7 @@
"via-recent-conversations": "Via recent conversations",
"via-inbox-view": "Via inbox view",
"via-left-sidebar": "Via left sidebar",
+ "via-right-sidebar": "Via right sidebar",
"instructions-for-all-platforms": "Instructions for all platforms",
"public-streams": "Public streams",
"private-streams": "Private streams",
|
{"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -77,6 +77,7 @@\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n+ \"via-right-sidebar\": \"Via right sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n", "issue": "Add \"View stream subscribers\" page\nWe should add a \"View stream subscribers\" page (below \"Create a stream\" under Streams & topics). It should document the right sidebar UI in #26717 (currently test-deployed on CZO), as well as how to see subscribers via settings.\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"most-systems\": \"Most systems\",\n \"linux-with-apt\": \"Linux with APT\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud-standard\": \"Zulip Cloud Standard\",\n \"zulip-cloud-plus\": \"Zulip Cloud Plus\",\n \"request-sponsorship\": \"Request sponsorship\",\n \"request-education-pricing\": \"Request education pricing\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v8\": \"Zulip Server 8.0+\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n \"for-a-bot\": \"For a bot\",\n \"for-yourself\": \"For yourself\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]}
| 3,109 | 142 |
gh_patches_debug_23065
|
rasdani/github-patches
|
git_diff
|
modoboa__modoboa-1658
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SPF parser failure
Parser can fail because of extra space characters.
```python
File "/srv/modoboa/instance/manage.py", line 25, in <module>
execute_from_command_line(sys.argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py", line 53, in run_from_argv
return super(SubcommandCommand, self).run_from_argv(argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py", line 86, in handle
return command.run_from_argv(argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py", line 275, in handle
self.check_domain(domain, **options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py", line 210, in check_domain
domain, "spf", ttl)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py", line 37, in get_or_create_for_domain
record.check_syntax(ttl)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py", line 74, in check_syntax
result = func(self.value)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/lib.py", line 173, in check_spf_syntax
if part[0] in ["+", "-", "~", "?"]:
IndexError: string index out of range
```
</issue>
<code>
[start of modoboa/dnstools/lib.py]
1 """dnstools library."""
2
3 from __future__ import unicode_literals
4
5 import ipaddress
6
7 from django.utils.translation import ugettext as _
8
9 from modoboa.admin import lib as admin_lib
10
11 from . import constants
12
13
14 def get_spf_record(domain):
15 """Return SPF record for domain (if any)."""
16 records = admin_lib.get_dns_records(domain, "TXT")
17 if records is None:
18 return None
19 for record in records:
20 value = str(record).strip('"')
21 if value.startswith("v=spf1"):
22 return value
23 return None
24
25
26 def get_dkim_record(domain, selector):
27 """Return DKIM records form domain (if any)."""
28 name = "{}._domainkey.{}".format(selector, domain)
29 records = admin_lib.get_dns_records(name, "TXT")
30 if records is None:
31 return None
32 for record in records:
33 value = str(record).strip('"')
34 if value.startswith("v=DKIM1"):
35 return value
36 return None
37
38
39 def get_dmarc_record(domain):
40 """Return DMARC record for domain (if any)."""
41 name = "_dmarc.{}".format(domain)
42 records = admin_lib.get_dns_records(name, "TXT")
43 if records is None:
44 return None
45 for record in records:
46 value = str(record).strip('"')
47 if value.startswith("v=DMARC1"):
48 return value
49 return None
50
51
52 def _get_simple_record(name):
53 """We just want to know if name is declared."""
54 for rdtype in ["A", "CNAME", "AAAA"]:
55 records = admin_lib.get_dns_records(name, rdtype)
56 if records is not None:
57 break
58 else:
59 return None
60 for record in records:
61 value = str(record).strip('"')
62 break
63 return value
64
65
66 def get_autoconfig_record(domain):
67 """Return autoconfig record for domain (if any)."""
68 return _get_simple_record("autoconfig.{}".format(domain))
69
70
71 def get_autodiscover_record(domain):
72 """Return autodiscover record for domain (if any)."""
73 return _get_simple_record("autodiscover.{}".format(domain))
74
75
76 class DNSSyntaxError(Exception):
77 """Custom exception for DNS errors."""
78
79 pass
80
81
82 def check_spf_ip4(value):
83 """Check syntax of ip4 mechanism."""
84 parts = value.split(":")
85 if len(parts) != 2:
86 raise DNSSyntaxError(_("Wrong ip4 mechanism syntax"))
87 try:
88 ipaddress.ip_network(parts[1], False)
89 except ValueError:
90 raise DNSSyntaxError(_("Wrong IPv4 address format"))
91
92
93 def check_spf_ip6(value):
94 """Check syntax of ip6 mechanism."""
95 if not value.startswith("ip6:"):
96 raise DNSSyntaxError(_("Wrong ip6 mechanism syntax"))
97 value = value.replace("ip6:", "")
98 try:
99 ipaddress.ip_network(value, False)
100 except ValueError:
101 raise DNSSyntaxError(_("Wrong IPv6 address format"))
102
103
104 def _check_domain_and_mask(value, mechanism):
105 """Check for valid domain / mask."""
106 domain = None
107 mask = None
108 if ":" in value:
109 mechanism, domain = value.split(":")
110 if "/" in domain:
111 domain, mask = domain.split("/")
112 elif "/" in value:
113 mechanism, mask = value.split("/")
114 else:
115 raise DNSSyntaxError(
116 _("Invalid syntax for {} mechanism").format(mechanism))
117 if mask and (not mask.isdigit() or int(mask) > 32):
118 raise DNSSyntaxError(_("Invalid mask found {}").format(mask))
119
120
121 def check_spf_a(value):
122 """Check syntax of a mechanism."""
123 if value == "a":
124 return
125 _check_domain_and_mask(value, "a")
126
127
128 def check_spf_mx(value):
129 """Check syntax of mx mechanism."""
130 if value == "mx":
131 return
132 _check_domain_and_mask(value, "mx")
133
134
135 def _check_simple(value, mechanism):
136 """Simple check."""
137 if value == mechanism:
138 return
139 parts = value.split(":")
140 if len(parts) != 2:
141 raise DNSSyntaxError(
142 _("Invalid syntax for {} mechanism").format(mechanism))
143
144
145 def check_spf_ptr(value):
146 """Check syntax of ptr mechanism."""
147 _check_simple(value, "ptr")
148
149
150 def check_spf_exists(value):
151 """Check syntax of ptr mechanism."""
152 _check_simple(value, "exists")
153
154
155 def check_spf_include(value):
156 """Check syntax of include mechanism."""
157 _check_simple(value, "include")
158
159
160 def check_spf_syntax(record):
161 """Check if record has a valid SPF syntax."""
162 if not record.startswith("v=spf1"):
163 raise DNSSyntaxError(_("Not an SPF record"))
164 parts = record.split(" ")[1:]
165 modifiers = []
166 mechanisms = []
167 for part in parts:
168 qualifier = None
169 if part[0] in ["+", "-", "~", "?"]:
170 qualifier = part[0]
171 part = part[1:]
172 if part == "all":
173 continue
174 for mechanism in constants.SPF_MECHANISMS:
175 if part.startswith(mechanism):
176 globals()["check_spf_{}".format(mechanism)](part)
177 mechanisms.append(mechanism)
178 break
179 else:
180 # Look for modifier
181 modifier = part.split("=")
182 if len(modifier) != 2:
183 raise DNSSyntaxError(_("Unknown mechanism {}").format(part))
184 if modifier[0] not in ["redirect", "exp"]:
185 raise DNSSyntaxError(_("Unknown modifier {}").format(
186 modifier[0]))
187 if modifier[0] in modifiers:
188 raise DNSSyntaxError(_("Duplicate modifier {} found").format(
189 modifier[0]))
190 modifiers.append(modifier[0])
191 if not len(mechanisms) and not len(modifiers):
192 raise DNSSyntaxError(_("No mechanism found"))
193 return None
194
195
196 def check_dkim_syntax(record):
197 """Check if record has a valid DKIM syntax."""
198 if not record.startswith("v=DKIM1"):
199 raise DNSSyntaxError(_("Not a valid DKIM record"))
200 key = None
201 for tag in record.split(";")[1:]:
202 tag = tag.strip(" ")
203 parts = tag.split("=", 1)
204 if len(parts) != 2:
205 raise DNSSyntaxError(_("Invalid tag {}").format(tag))
206 name = parts[0].strip(" ")
207 if name == "p":
208 key = "".join(part.strip('"') for part in parts[1].split(" "))
209 if key is None:
210 raise DNSSyntaxError(_("No key found in record"))
211 return key
212
213
214 def check_dmarc_tag_string_value(tag, value):
215 """Check if value is valid for tag."""
216 tdef = constants.DMARC_TAGS[tag]
217 error = _("Wrong value {} for tag {}").format(value, tag)
218 if "values" in tdef and value not in tdef["values"]:
219 raise DNSSyntaxError(error)
220 elif "regex" in tdef and tdef["regex"].match(value) is None:
221 raise DNSSyntaxError(error)
222
223
224 def check_dmarc_tag(tag, value):
225 """Check if tag is valid."""
226 tdef = constants.DMARC_TAGS[tag]
227 ttype = tdef.get("type", "string")
228 if ttype == "list":
229 for svalue in value.split(","):
230 check_dmarc_tag_string_value(tag, svalue)
231 elif ttype == "int":
232 error = _("Wrong value {} for tag {}:").format(value, tag)
233 try:
234 value = int(value)
235 except ValueError:
236 raise DNSSyntaxError(error + _(" not an integer"))
237 if "min_value" in tdef and value < tdef["min_value"]:
238 raise DNSSyntaxError(
239 error + _(" less than {}").format(tdef["min_value"]))
240 if "max_value" in tdef and value > tdef["max_value"]:
241 raise DNSSyntaxError(
242 error + _(" greater than {}").format(tdef["max_value"]))
243 else:
244 check_dmarc_tag_string_value(tag, value)
245
246
247 def check_dmarc_syntax(record):
248 """Check if record has a valid DMARC syntax."""
249 if not record.startswith("v=DMARC1"):
250 raise DNSSyntaxError(_("Not a valid DMARC record"))
251 tags = {}
252 for tag in record.split(";")[1:]:
253 tag = tag.strip(" ")
254 parts = tag.split("=")
255 if len(parts) != 2:
256 raise DNSSyntaxError(_("Invalid tag {}").format(tag))
257 name = parts[0].strip(" ")
258 if name not in constants.DMARC_TAGS:
259 raise DNSSyntaxError(_("Unknown tag {}").format(name))
260 value = parts[1].strip(" ")
261 check_dmarc_tag(name, value)
262 tags[name] = value
263 if "p" not in tags:
264 raise DNSSyntaxError(_("Missing required p tag"))
265
[end of modoboa/dnstools/lib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modoboa/dnstools/lib.py b/modoboa/dnstools/lib.py
--- a/modoboa/dnstools/lib.py
+++ b/modoboa/dnstools/lib.py
@@ -165,6 +165,8 @@
modifiers = []
mechanisms = []
for part in parts:
+ if part == "":
+ continue
qualifier = None
if part[0] in ["+", "-", "~", "?"]:
qualifier = part[0]
@@ -200,6 +202,8 @@
key = None
for tag in record.split(";")[1:]:
tag = tag.strip(" ")
+ if tag == "":
+ continue
parts = tag.split("=", 1)
if len(parts) != 2:
raise DNSSyntaxError(_("Invalid tag {}").format(tag))
@@ -250,6 +254,8 @@
raise DNSSyntaxError(_("Not a valid DMARC record"))
tags = {}
for tag in record.split(";")[1:]:
+ if tag == "":
+ continue
tag = tag.strip(" ")
parts = tag.split("=")
if len(parts) != 2:
|
{"golden_diff": "diff --git a/modoboa/dnstools/lib.py b/modoboa/dnstools/lib.py\n--- a/modoboa/dnstools/lib.py\n+++ b/modoboa/dnstools/lib.py\n@@ -165,6 +165,8 @@\n modifiers = []\n mechanisms = []\n for part in parts:\n+ if part == \"\":\n+ continue\n qualifier = None\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\n qualifier = part[0]\n@@ -200,6 +202,8 @@\n key = None\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n+ if tag == \"\":\n+ continue\n parts = tag.split(\"=\", 1)\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n@@ -250,6 +254,8 @@\n raise DNSSyntaxError(_(\"Not a valid DMARC record\"))\n tags = {}\n for tag in record.split(\";\")[1:]:\n+ if tag == \"\":\n+ continue\n tag = tag.strip(\" \")\n parts = tag.split(\"=\")\n if len(parts) != 2:\n", "issue": "SPF parser failure\nParser can fail because of extra space characters.\r\n```python\r\n File \"/srv/modoboa/instance/manage.py\", line 25, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 364, in execute_from_command_line\r\n utility.execute()\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 356, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py\", line 53, in run_from_argv\r\n return super(SubcommandCommand, self).run_from_argv(argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py\", line 86, in handle\r\n return command.run_from_argv(argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py\", line 275, in handle\r\n self.check_domain(domain, **options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py\", line 210, in check_domain\r\n domain, \"spf\", ttl)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py\", line 37, in get_or_create_for_domain\r\n record.check_syntax(ttl)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py\", line 74, in check_syntax\r\n result = func(self.value)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/lib.py\", line 173, in check_spf_syntax\r\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\r\nIndexError: string index out of range\r\n```\n", "before_files": [{"content": "\"\"\"dnstools library.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport ipaddress\n\nfrom django.utils.translation import ugettext as _\n\nfrom modoboa.admin import lib as admin_lib\n\nfrom . import constants\n\n\ndef get_spf_record(domain):\n \"\"\"Return SPF record for domain (if any).\"\"\"\n records = admin_lib.get_dns_records(domain, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=spf1\"):\n return value\n return None\n\n\ndef get_dkim_record(domain, selector):\n \"\"\"Return DKIM records form domain (if any).\"\"\"\n name = \"{}._domainkey.{}\".format(selector, domain)\n records = admin_lib.get_dns_records(name, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=DKIM1\"):\n return value\n return None\n\n\ndef get_dmarc_record(domain):\n \"\"\"Return DMARC record for domain (if any).\"\"\"\n name = \"_dmarc.{}\".format(domain)\n records = admin_lib.get_dns_records(name, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=DMARC1\"):\n return value\n return None\n\n\ndef _get_simple_record(name):\n \"\"\"We just want to know if name is declared.\"\"\"\n for rdtype in [\"A\", \"CNAME\", \"AAAA\"]:\n records = admin_lib.get_dns_records(name, rdtype)\n if records is not None:\n break\n else:\n return None\n for record in records:\n value = str(record).strip('\"')\n break\n return value\n\n\ndef get_autoconfig_record(domain):\n \"\"\"Return autoconfig record for domain (if any).\"\"\"\n return _get_simple_record(\"autoconfig.{}\".format(domain))\n\n\ndef get_autodiscover_record(domain):\n \"\"\"Return autodiscover record for domain (if any).\"\"\"\n return _get_simple_record(\"autodiscover.{}\".format(domain))\n\n\nclass DNSSyntaxError(Exception):\n \"\"\"Custom exception for DNS errors.\"\"\"\n\n pass\n\n\ndef check_spf_ip4(value):\n \"\"\"Check syntax of ip4 mechanism.\"\"\"\n parts = value.split(\":\")\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Wrong ip4 mechanism syntax\"))\n try:\n ipaddress.ip_network(parts[1], False)\n except ValueError:\n raise DNSSyntaxError(_(\"Wrong IPv4 address format\"))\n\n\ndef check_spf_ip6(value):\n \"\"\"Check syntax of ip6 mechanism.\"\"\"\n if not value.startswith(\"ip6:\"):\n raise DNSSyntaxError(_(\"Wrong ip6 mechanism syntax\"))\n value = value.replace(\"ip6:\", \"\")\n try:\n ipaddress.ip_network(value, False)\n except ValueError:\n raise DNSSyntaxError(_(\"Wrong IPv6 address format\"))\n\n\ndef _check_domain_and_mask(value, mechanism):\n \"\"\"Check for valid domain / mask.\"\"\"\n domain = None\n mask = None\n if \":\" in value:\n mechanism, domain = value.split(\":\")\n if \"/\" in domain:\n domain, mask = domain.split(\"/\")\n elif \"/\" in value:\n mechanism, mask = value.split(\"/\")\n else:\n raise DNSSyntaxError(\n _(\"Invalid syntax for {} mechanism\").format(mechanism))\n if mask and (not mask.isdigit() or int(mask) > 32):\n raise DNSSyntaxError(_(\"Invalid mask found {}\").format(mask))\n\n\ndef check_spf_a(value):\n \"\"\"Check syntax of a mechanism.\"\"\"\n if value == \"a\":\n return\n _check_domain_and_mask(value, \"a\")\n\n\ndef check_spf_mx(value):\n \"\"\"Check syntax of mx mechanism.\"\"\"\n if value == \"mx\":\n return\n _check_domain_and_mask(value, \"mx\")\n\n\ndef _check_simple(value, mechanism):\n \"\"\"Simple check.\"\"\"\n if value == mechanism:\n return\n parts = value.split(\":\")\n if len(parts) != 2:\n raise DNSSyntaxError(\n _(\"Invalid syntax for {} mechanism\").format(mechanism))\n\n\ndef check_spf_ptr(value):\n \"\"\"Check syntax of ptr mechanism.\"\"\"\n _check_simple(value, \"ptr\")\n\n\ndef check_spf_exists(value):\n \"\"\"Check syntax of ptr mechanism.\"\"\"\n _check_simple(value, \"exists\")\n\n\ndef check_spf_include(value):\n \"\"\"Check syntax of include mechanism.\"\"\"\n _check_simple(value, \"include\")\n\n\ndef check_spf_syntax(record):\n \"\"\"Check if record has a valid SPF syntax.\"\"\"\n if not record.startswith(\"v=spf1\"):\n raise DNSSyntaxError(_(\"Not an SPF record\"))\n parts = record.split(\" \")[1:]\n modifiers = []\n mechanisms = []\n for part in parts:\n qualifier = None\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\n qualifier = part[0]\n part = part[1:]\n if part == \"all\":\n continue\n for mechanism in constants.SPF_MECHANISMS:\n if part.startswith(mechanism):\n globals()[\"check_spf_{}\".format(mechanism)](part)\n mechanisms.append(mechanism)\n break\n else:\n # Look for modifier\n modifier = part.split(\"=\")\n if len(modifier) != 2:\n raise DNSSyntaxError(_(\"Unknown mechanism {}\").format(part))\n if modifier[0] not in [\"redirect\", \"exp\"]:\n raise DNSSyntaxError(_(\"Unknown modifier {}\").format(\n modifier[0]))\n if modifier[0] in modifiers:\n raise DNSSyntaxError(_(\"Duplicate modifier {} found\").format(\n modifier[0]))\n modifiers.append(modifier[0])\n if not len(mechanisms) and not len(modifiers):\n raise DNSSyntaxError(_(\"No mechanism found\"))\n return None\n\n\ndef check_dkim_syntax(record):\n \"\"\"Check if record has a valid DKIM syntax.\"\"\"\n if not record.startswith(\"v=DKIM1\"):\n raise DNSSyntaxError(_(\"Not a valid DKIM record\"))\n key = None\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n parts = tag.split(\"=\", 1)\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n name = parts[0].strip(\" \")\n if name == \"p\":\n key = \"\".join(part.strip('\"') for part in parts[1].split(\" \"))\n if key is None:\n raise DNSSyntaxError(_(\"No key found in record\"))\n return key\n\n\ndef check_dmarc_tag_string_value(tag, value):\n \"\"\"Check if value is valid for tag.\"\"\"\n tdef = constants.DMARC_TAGS[tag]\n error = _(\"Wrong value {} for tag {}\").format(value, tag)\n if \"values\" in tdef and value not in tdef[\"values\"]:\n raise DNSSyntaxError(error)\n elif \"regex\" in tdef and tdef[\"regex\"].match(value) is None:\n raise DNSSyntaxError(error)\n\n\ndef check_dmarc_tag(tag, value):\n \"\"\"Check if tag is valid.\"\"\"\n tdef = constants.DMARC_TAGS[tag]\n ttype = tdef.get(\"type\", \"string\")\n if ttype == \"list\":\n for svalue in value.split(\",\"):\n check_dmarc_tag_string_value(tag, svalue)\n elif ttype == \"int\":\n error = _(\"Wrong value {} for tag {}:\").format(value, tag)\n try:\n value = int(value)\n except ValueError:\n raise DNSSyntaxError(error + _(\" not an integer\"))\n if \"min_value\" in tdef and value < tdef[\"min_value\"]:\n raise DNSSyntaxError(\n error + _(\" less than {}\").format(tdef[\"min_value\"]))\n if \"max_value\" in tdef and value > tdef[\"max_value\"]:\n raise DNSSyntaxError(\n error + _(\" greater than {}\").format(tdef[\"max_value\"]))\n else:\n check_dmarc_tag_string_value(tag, value)\n\n\ndef check_dmarc_syntax(record):\n \"\"\"Check if record has a valid DMARC syntax.\"\"\"\n if not record.startswith(\"v=DMARC1\"):\n raise DNSSyntaxError(_(\"Not a valid DMARC record\"))\n tags = {}\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n parts = tag.split(\"=\")\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n name = parts[0].strip(\" \")\n if name not in constants.DMARC_TAGS:\n raise DNSSyntaxError(_(\"Unknown tag {}\").format(name))\n value = parts[1].strip(\" \")\n check_dmarc_tag(name, value)\n tags[name] = value\n if \"p\" not in tags:\n raise DNSSyntaxError(_(\"Missing required p tag\"))\n", "path": "modoboa/dnstools/lib.py"}]}
| 3,832 | 262 |
gh_patches_debug_38706
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-1991
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Interface's name isn't necessary to be ended with a digit
Remove the step to convert to integer as it doesn't affect the result.
</issue>
<code>
[start of scapy/arch/bpf/core.py]
1 # Guillaume Valadon <[email protected]>
2
3 """
4 Scapy *BSD native support - core
5 """
6
7 from __future__ import absolute_import
8 from scapy.config import conf
9 from scapy.error import Scapy_Exception, warning
10 from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
11 from scapy.arch.common import get_if, compile_filter
12 from scapy.consts import LOOPBACK_NAME
13
14 from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF
15
16 import os
17 import socket
18 import fcntl
19 import struct
20
21 from ctypes import cdll, cast, pointer
22 from ctypes import c_int, c_ulong, c_char_p
23 from ctypes.util import find_library
24 from scapy.modules.six.moves import range
25
26
27 # ctypes definitions
28
29 LIBC = cdll.LoadLibrary(find_library("libc"))
30 LIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]
31 LIBC.ioctl.restype = c_int
32
33
34 # Addresses manipulation functions
35
36 def get_if_raw_addr(ifname):
37 """Returns the IPv4 address configured on 'ifname', packed with inet_pton.""" # noqa: E501
38
39 # Get ifconfig output
40 try:
41 fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
42 except OSError as msg:
43 warning("Failed to execute ifconfig: (%s)", msg)
44 return b"\0\0\0\0"
45
46 # Get IPv4 addresses
47 addresses = [l for l in fd if l.find("inet ") >= 0]
48 if not addresses:
49 warning("No IPv4 address found on %s !", ifname)
50 return b"\0\0\0\0"
51
52 # Pack the first address
53 address = addresses[0].split(' ')[1]
54 if '/' in address: # NetBSD 8.0
55 address = address.split("/")[0]
56 return socket.inet_pton(socket.AF_INET, address)
57
58
59 def get_if_raw_hwaddr(ifname):
60 """Returns the packed MAC address configured on 'ifname'."""
61
62 NULL_MAC_ADDRESS = b'\x00' * 6
63
64 # Handle the loopback interface separately
65 if ifname == LOOPBACK_NAME:
66 return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)
67
68 # Get ifconfig output
69 try:
70 fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
71 except OSError as msg:
72 raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
73
74 # Get MAC addresses
75 addresses = [l for l in fd.readlines() if l.find("ether") >= 0 or
76 l.find("lladdr") >= 0 or
77 l.find("address") >= 0]
78 if not addresses:
79 raise Scapy_Exception("No MAC address found on %s !" % ifname)
80
81 # Pack and return the MAC address
82 mac = addresses[0].split(' ')[1]
83 mac = [chr(int(b, 16)) for b in mac.split(':')]
84 return (ARPHDR_ETHER, ''.join(mac))
85
86
87 # BPF specific functions
88
89 def get_dev_bpf():
90 """Returns an opened BPF file object"""
91
92 # Get the first available BPF handle
93 for bpf in range(256):
94 try:
95 fd = os.open("/dev/bpf%i" % bpf, os.O_RDWR)
96 return (fd, bpf)
97 except OSError:
98 continue
99
100 raise Scapy_Exception("No /dev/bpf handle is available !")
101
102
103 def attach_filter(fd, bpf_filter, iface):
104 """Attach a BPF filter to the BPF file descriptor"""
105 bp = compile_filter(bpf_filter, iface)
106 # Assign the BPF program to the interface
107 ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))
108 if ret < 0:
109 raise Scapy_Exception("Can't attach the BPF filter !")
110
111
112 # Interface manipulation functions
113
114 def get_if_list():
115 """Returns a list containing all network interfaces."""
116
117 # Get ifconfig output
118 try:
119 fd = os.popen("%s -a" % conf.prog.ifconfig)
120 except OSError as msg:
121 raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
122
123 # Get interfaces
124 interfaces = [line[:line.find(':')] for line in fd.readlines()
125 if ": flags" in line.lower()]
126 return interfaces
127
128
129 def get_working_ifaces():
130 """
131 Returns an ordered list of interfaces that could be used with BPF.
132 Note: the order mimics pcap_findalldevs() behavior
133 """
134
135 # Only root is allowed to perform the following ioctl() call
136 if os.getuid() != 0:
137 return []
138
139 # Test all network interfaces
140 interfaces = []
141 for ifname in get_if_list():
142
143 # Unlike pcap_findalldevs(), we do not care of loopback interfaces.
144 if ifname == LOOPBACK_NAME:
145 continue
146
147 # Get interface flags
148 try:
149 result = get_if(ifname, SIOCGIFFLAGS)
150 except IOError:
151 warning("ioctl(SIOCGIFFLAGS) failed on %s !", ifname)
152 continue
153
154 # Convert flags
155 ifflags = struct.unpack("16xH14x", result)[0]
156 if ifflags & 0x1: # IFF_UP
157
158 # Get a BPF handle
159 fd, _ = get_dev_bpf()
160 if fd is None:
161 raise Scapy_Exception("No /dev/bpf are available !")
162
163 # Check if the interface can be used
164 try:
165 fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x", ifname.encode())) # noqa: E501
166 interfaces.append((ifname, int(ifname[-1])))
167 except IOError:
168 pass
169
170 # Close the file descriptor
171 os.close(fd)
172
173 # Sort to mimic pcap_findalldevs() order
174 interfaces.sort(key=lambda elt: elt[1])
175
176 return interfaces
177
178
179 def get_working_if():
180 """Returns the first interface than can be used with BPF"""
181
182 ifaces = get_working_ifaces()
183 if not ifaces:
184 # A better interface will be selected later using the routing table
185 return LOOPBACK_NAME
186 return ifaces[0][0]
187
[end of scapy/arch/bpf/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scapy/arch/bpf/core.py b/scapy/arch/bpf/core.py
--- a/scapy/arch/bpf/core.py
+++ b/scapy/arch/bpf/core.py
@@ -5,22 +5,22 @@
"""
from __future__ import absolute_import
-from scapy.config import conf
-from scapy.error import Scapy_Exception, warning
-from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
-from scapy.arch.common import get_if, compile_filter
-from scapy.consts import LOOPBACK_NAME
-
-from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF
+from ctypes import cdll, cast, pointer
+from ctypes import c_int, c_ulong, c_char_p
+from ctypes.util import find_library
+import fcntl
import os
+import re
import socket
-import fcntl
import struct
-from ctypes import cdll, cast, pointer
-from ctypes import c_int, c_ulong, c_char_p
-from ctypes.util import find_library
+from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF
+from scapy.arch.common import get_if, compile_filter
+from scapy.config import conf
+from scapy.consts import LOOPBACK_NAME
+from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
+from scapy.error import Scapy_Exception, warning
from scapy.modules.six.moves import range
@@ -126,6 +126,9 @@
return interfaces
+_IFNUM = re.compile("([0-9]*)([ab]?)$")
+
+
def get_working_ifaces():
"""
Returns an ordered list of interfaces that could be used with BPF.
@@ -156,24 +159,27 @@
if ifflags & 0x1: # IFF_UP
# Get a BPF handle
- fd, _ = get_dev_bpf()
+ fd = get_dev_bpf()[0]
if fd is None:
raise Scapy_Exception("No /dev/bpf are available !")
# Check if the interface can be used
try:
- fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x", ifname.encode())) # noqa: E501
- interfaces.append((ifname, int(ifname[-1])))
+ fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x",
+ ifname.encode()))
except IOError:
pass
-
- # Close the file descriptor
- os.close(fd)
+ else:
+ ifnum, ifab = _IFNUM.search(ifname).groups()
+ interfaces.append((ifname, int(ifnum) if ifnum else -1, ifab))
+ finally:
+ # Close the file descriptor
+ os.close(fd)
# Sort to mimic pcap_findalldevs() order
- interfaces.sort(key=lambda elt: elt[1])
+ interfaces.sort(key=lambda elt: (elt[1], elt[2], elt[0]))
- return interfaces
+ return [iface[0] for iface in interfaces]
def get_working_if():
@@ -183,4 +189,4 @@
if not ifaces:
# A better interface will be selected later using the routing table
return LOOPBACK_NAME
- return ifaces[0][0]
+ return ifaces[0]
|
{"golden_diff": "diff --git a/scapy/arch/bpf/core.py b/scapy/arch/bpf/core.py\n--- a/scapy/arch/bpf/core.py\n+++ b/scapy/arch/bpf/core.py\n@@ -5,22 +5,22 @@\n \"\"\"\n \n from __future__ import absolute_import\n-from scapy.config import conf\n-from scapy.error import Scapy_Exception, warning\n-from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\n-from scapy.arch.common import get_if, compile_filter\n-from scapy.consts import LOOPBACK_NAME\n-\n-from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\n \n+from ctypes import cdll, cast, pointer\n+from ctypes import c_int, c_ulong, c_char_p\n+from ctypes.util import find_library\n+import fcntl\n import os\n+import re\n import socket\n-import fcntl\n import struct\n \n-from ctypes import cdll, cast, pointer\n-from ctypes import c_int, c_ulong, c_char_p\n-from ctypes.util import find_library\n+from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\n+from scapy.arch.common import get_if, compile_filter\n+from scapy.config import conf\n+from scapy.consts import LOOPBACK_NAME\n+from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\n+from scapy.error import Scapy_Exception, warning\n from scapy.modules.six.moves import range\n \n \n@@ -126,6 +126,9 @@\n return interfaces\n \n \n+_IFNUM = re.compile(\"([0-9]*)([ab]?)$\")\n+\n+\n def get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n@@ -156,24 +159,27 @@\n if ifflags & 0x1: # IFF_UP\n \n # Get a BPF handle\n- fd, _ = get_dev_bpf()\n+ fd = get_dev_bpf()[0]\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n \n # Check if the interface can be used\n try:\n- fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\", ifname.encode())) # noqa: E501\n- interfaces.append((ifname, int(ifname[-1])))\n+ fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\",\n+ ifname.encode()))\n except IOError:\n pass\n-\n- # Close the file descriptor\n- os.close(fd)\n+ else:\n+ ifnum, ifab = _IFNUM.search(ifname).groups()\n+ interfaces.append((ifname, int(ifnum) if ifnum else -1, ifab))\n+ finally:\n+ # Close the file descriptor\n+ os.close(fd)\n \n # Sort to mimic pcap_findalldevs() order\n- interfaces.sort(key=lambda elt: elt[1])\n+ interfaces.sort(key=lambda elt: (elt[1], elt[2], elt[0]))\n \n- return interfaces\n+ return [iface[0] for iface in interfaces]\n \n \n def get_working_if():\n@@ -183,4 +189,4 @@\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n- return ifaces[0][0]\n+ return ifaces[0]\n", "issue": "Interface's name isn't necessary to be ended with a digit\nRemove the step to convert to integer as it doesn't affect the result.\n", "before_files": [{"content": "# Guillaume Valadon <[email protected]>\n\n\"\"\"\nScapy *BSD native support - core\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\nfrom scapy.arch.common import get_if, compile_filter\nfrom scapy.consts import LOOPBACK_NAME\n\nfrom scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\n\nimport os\nimport socket\nimport fcntl\nimport struct\n\nfrom ctypes import cdll, cast, pointer\nfrom ctypes import c_int, c_ulong, c_char_p\nfrom ctypes.util import find_library\nfrom scapy.modules.six.moves import range\n\n\n# ctypes definitions\n\nLIBC = cdll.LoadLibrary(find_library(\"libc\"))\nLIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]\nLIBC.ioctl.restype = c_int\n\n\n# Addresses manipulation functions\n\ndef get_if_raw_addr(ifname):\n \"\"\"Returns the IPv4 address configured on 'ifname', packed with inet_pton.\"\"\" # noqa: E501\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n warning(\"Failed to execute ifconfig: (%s)\", msg)\n return b\"\\0\\0\\0\\0\"\n\n # Get IPv4 addresses\n addresses = [l for l in fd if l.find(\"inet \") >= 0]\n if not addresses:\n warning(\"No IPv4 address found on %s !\", ifname)\n return b\"\\0\\0\\0\\0\"\n\n # Pack the first address\n address = addresses[0].split(' ')[1]\n if '/' in address: # NetBSD 8.0\n address = address.split(\"/\")[0]\n return socket.inet_pton(socket.AF_INET, address)\n\n\ndef get_if_raw_hwaddr(ifname):\n \"\"\"Returns the packed MAC address configured on 'ifname'.\"\"\"\n\n NULL_MAC_ADDRESS = b'\\x00' * 6\n\n # Handle the loopback interface separately\n if ifname == LOOPBACK_NAME:\n return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get MAC addresses\n addresses = [l for l in fd.readlines() if l.find(\"ether\") >= 0 or\n l.find(\"lladdr\") >= 0 or\n l.find(\"address\") >= 0]\n if not addresses:\n raise Scapy_Exception(\"No MAC address found on %s !\" % ifname)\n\n # Pack and return the MAC address\n mac = addresses[0].split(' ')[1]\n mac = [chr(int(b, 16)) for b in mac.split(':')]\n return (ARPHDR_ETHER, ''.join(mac))\n\n\n# BPF specific functions\n\ndef get_dev_bpf():\n \"\"\"Returns an opened BPF file object\"\"\"\n\n # Get the first available BPF handle\n for bpf in range(256):\n try:\n fd = os.open(\"/dev/bpf%i\" % bpf, os.O_RDWR)\n return (fd, bpf)\n except OSError:\n continue\n\n raise Scapy_Exception(\"No /dev/bpf handle is available !\")\n\n\ndef attach_filter(fd, bpf_filter, iface):\n \"\"\"Attach a BPF filter to the BPF file descriptor\"\"\"\n bp = compile_filter(bpf_filter, iface)\n # Assign the BPF program to the interface\n ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))\n if ret < 0:\n raise Scapy_Exception(\"Can't attach the BPF filter !\")\n\n\n# Interface manipulation functions\n\ndef get_if_list():\n \"\"\"Returns a list containing all network interfaces.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s -a\" % conf.prog.ifconfig)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get interfaces\n interfaces = [line[:line.find(':')] for line in fd.readlines()\n if \": flags\" in line.lower()]\n return interfaces\n\n\ndef get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n Note: the order mimics pcap_findalldevs() behavior\n \"\"\"\n\n # Only root is allowed to perform the following ioctl() call\n if os.getuid() != 0:\n return []\n\n # Test all network interfaces\n interfaces = []\n for ifname in get_if_list():\n\n # Unlike pcap_findalldevs(), we do not care of loopback interfaces.\n if ifname == LOOPBACK_NAME:\n continue\n\n # Get interface flags\n try:\n result = get_if(ifname, SIOCGIFFLAGS)\n except IOError:\n warning(\"ioctl(SIOCGIFFLAGS) failed on %s !\", ifname)\n continue\n\n # Convert flags\n ifflags = struct.unpack(\"16xH14x\", result)[0]\n if ifflags & 0x1: # IFF_UP\n\n # Get a BPF handle\n fd, _ = get_dev_bpf()\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n\n # Check if the interface can be used\n try:\n fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\", ifname.encode())) # noqa: E501\n interfaces.append((ifname, int(ifname[-1])))\n except IOError:\n pass\n\n # Close the file descriptor\n os.close(fd)\n\n # Sort to mimic pcap_findalldevs() order\n interfaces.sort(key=lambda elt: elt[1])\n\n return interfaces\n\n\ndef get_working_if():\n \"\"\"Returns the first interface than can be used with BPF\"\"\"\n\n ifaces = get_working_ifaces()\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n return ifaces[0][0]\n", "path": "scapy/arch/bpf/core.py"}]}
| 2,466 | 782 |
gh_patches_debug_30616
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-897
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing fiat currency does not change manually tracked balances value column title
## Problem Definition
Seen in v1.4.0 by @cryptomole1
Changing the FIAT currency via the drop-down menu if the user has manually tracked balances changes the value but not the title of the column. The title of the column is stuck to being `USD Value`.
I can confirm the problem, just tested it.
## Task
- Fix it
- Write a unit test
</issue>
<code>
[start of rotkehlchen/balances/manual.py]
1 from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
2
3 from rotkehlchen.assets.asset import Asset
4 from rotkehlchen.errors import InputError
5 from rotkehlchen.fval import FVal
6 from rotkehlchen.inquirer import Inquirer
7 from rotkehlchen.typing import Location
8
9 if TYPE_CHECKING:
10 from rotkehlchen.db.dbhandler import DBHandler
11
12
13 class ManuallyTrackedBalance(NamedTuple):
14 asset: Asset
15 label: str
16 amount: FVal
17 location: Location
18 tags: Optional[List[str]]
19
20
21 class ManuallyTrackedBalanceWithValue(NamedTuple):
22 # NamedTuples can't use inheritance. Make sure this has same fields as
23 # ManuallyTrackedBalance until usd_value
24 asset: Asset
25 label: str
26 amount: FVal
27 location: Location
28 tags: Optional[List[str]]
29 usd_value: FVal
30
31
32 def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:
33 """Gets the manually tracked balances
34
35 May raise:
36 - RemoteError if there is a problem querying for the current price of an asset
37 """
38 balances = db.get_manually_tracked_balances()
39 balances_with_value = []
40 for entry in balances:
41 price = Inquirer().find_usd_price(entry.asset)
42 # https://github.com/python/mypy/issues/2582 --> for the type ignore below
43 balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore
44 **entry._asdict(),
45 usd_value=price * entry.amount,
46 ))
47
48 return balances_with_value
49
50
51 def add_manually_tracked_balances(
52 db: 'DBHandler',
53 data: List[ManuallyTrackedBalance],
54 ) -> None:
55 """Adds manually tracked balances
56
57 May raise:
58 - InputError if any of the given balance entry labels already exist in the DB
59 - TagConstraintError if any of the given manually tracked balances contain unknown tags.
60 """
61 if len(data) == 0:
62 raise InputError('Empty list of manually tracked balances to add was given')
63 db.ensure_tags_exist(
64 given_data=data,
65 action='adding',
66 data_type='manually tracked balances',
67 )
68 db.add_manually_tracked_balances(data=data)
69
70
71 def edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:
72 """Edits manually tracked balances
73
74 May raise:
75 - InputError if the given balances list is empty or if
76 any of the balance entry labels to edit do not exist in the DB.
77 - TagConstraintError if any of the given balance data contain unknown tags.
78 """
79 if len(data) == 0:
80 raise InputError('Empty list of manually tracked balances to edit was given')
81 db.ensure_tags_exist(
82 given_data=data,
83 action='editing',
84 data_type='manually tracked balances',
85 )
86 db.edit_manually_tracked_balances(data)
87
88
89 def account_for_manually_tracked_balances(
90 db: 'DBHandler',
91 balances: Dict[str, Any],
92 ) -> Dict[str, Any]:
93 """Given the big balances mapping adds to it all manually tracked balances"""
94 manually_tracked_balances = get_manually_tracked_balances(db)
95 for m_entry in manually_tracked_balances:
96 location_str = str(m_entry.location)
97 if location_str not in balances:
98 balances[location_str] = {}
99 balances[location_str][m_entry.asset.identifier] = {
100 'amount': m_entry.amount,
101 'usd_value': m_entry.usd_value,
102 }
103 else:
104 if m_entry.asset.identifier not in balances[location_str]:
105 balances[location_str][m_entry.asset.identifier] = {
106 'amount': m_entry.amount,
107 'usd_value': m_entry.usd_value,
108 }
109 else:
110 old_amount = balances[location_str][m_entry.asset.identifier]['amount']
111 old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']
112 balances[location_str][m_entry.asset.identifier] = {
113 'amount': old_amount + m_entry.amount,
114 'usd_value': old_usd_value + m_entry.usd_value,
115 }
116
117 return balances
118
[end of rotkehlchen/balances/manual.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rotkehlchen/balances/manual.py b/rotkehlchen/balances/manual.py
--- a/rotkehlchen/balances/manual.py
+++ b/rotkehlchen/balances/manual.py
@@ -1,10 +1,11 @@
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from rotkehlchen.assets.asset import Asset
-from rotkehlchen.errors import InputError
+from rotkehlchen.constants.misc import ZERO
+from rotkehlchen.errors import InputError, RemoteError
from rotkehlchen.fval import FVal
from rotkehlchen.inquirer import Inquirer
-from rotkehlchen.typing import Location
+from rotkehlchen.typing import Location, Price
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
@@ -30,15 +31,18 @@
def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:
- """Gets the manually tracked balances
-
- May raise:
- - RemoteError if there is a problem querying for the current price of an asset
- """
+ """Gets the manually tracked balances"""
balances = db.get_manually_tracked_balances()
balances_with_value = []
for entry in balances:
- price = Inquirer().find_usd_price(entry.asset)
+ try:
+ price = Inquirer().find_usd_price(entry.asset)
+ except RemoteError as e:
+ db.msg_aggregator.add_warning(
+ f'Could not find price for {entry.asset.identifier} during '
+ f'manually tracked balance querying due to {str(e)}',
+ )
+ price = Price(ZERO)
# https://github.com/python/mypy/issues/2582 --> for the type ignore below
balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore
**entry._asdict(),
|
{"golden_diff": "diff --git a/rotkehlchen/balances/manual.py b/rotkehlchen/balances/manual.py\n--- a/rotkehlchen/balances/manual.py\n+++ b/rotkehlchen/balances/manual.py\n@@ -1,10 +1,11 @@\n from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n \n from rotkehlchen.assets.asset import Asset\n-from rotkehlchen.errors import InputError\n+from rotkehlchen.constants.misc import ZERO\n+from rotkehlchen.errors import InputError, RemoteError\n from rotkehlchen.fval import FVal\n from rotkehlchen.inquirer import Inquirer\n-from rotkehlchen.typing import Location\n+from rotkehlchen.typing import Location, Price\n \n if TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n@@ -30,15 +31,18 @@\n \n \n def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n- \"\"\"Gets the manually tracked balances\n-\n- May raise:\n- - RemoteError if there is a problem querying for the current price of an asset\n- \"\"\"\n+ \"\"\"Gets the manually tracked balances\"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n- price = Inquirer().find_usd_price(entry.asset)\n+ try:\n+ price = Inquirer().find_usd_price(entry.asset)\n+ except RemoteError as e:\n+ db.msg_aggregator.add_warning(\n+ f'Could not find price for {entry.asset.identifier} during '\n+ f'manually tracked balance querying due to {str(e)}',\n+ )\n+ price = Price(ZERO)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n", "issue": "Changing fiat currency does not change manually tracked balances value column title\n## Problem Definition\r\n\r\nSeen in v1.4.0 by @cryptomole1\r\n\r\nChanging the FIAT currency via the drop-down menu if the user has manually tracked balances changes the value but not the title of the column. The title of the column is stuck to being `USD Value`.\r\n\r\nI can confirm the problem, just tested it.\r\n\r\n## Task\r\n\r\n- Fix it\r\n- Write a unit test\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.errors import InputError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.typing import Location\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\nclass ManuallyTrackedBalance(NamedTuple):\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n\n\nclass ManuallyTrackedBalanceWithValue(NamedTuple):\n # NamedTuples can't use inheritance. Make sure this has same fields as\n # ManuallyTrackedBalance until usd_value\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n usd_value: FVal\n\n\ndef get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n \"\"\"Gets the manually tracked balances\n\n May raise:\n - RemoteError if there is a problem querying for the current price of an asset\n \"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n price = Inquirer().find_usd_price(entry.asset)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n usd_value=price * entry.amount,\n ))\n\n return balances_with_value\n\n\ndef add_manually_tracked_balances(\n db: 'DBHandler',\n data: List[ManuallyTrackedBalance],\n) -> None:\n \"\"\"Adds manually tracked balances\n\n May raise:\n - InputError if any of the given balance entry labels already exist in the DB\n - TagConstraintError if any of the given manually tracked balances contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to add was given')\n db.ensure_tags_exist(\n given_data=data,\n action='adding',\n data_type='manually tracked balances',\n )\n db.add_manually_tracked_balances(data=data)\n\n\ndef edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:\n \"\"\"Edits manually tracked balances\n\n May raise:\n - InputError if the given balances list is empty or if\n any of the balance entry labels to edit do not exist in the DB.\n - TagConstraintError if any of the given balance data contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to edit was given')\n db.ensure_tags_exist(\n given_data=data,\n action='editing',\n data_type='manually tracked balances',\n )\n db.edit_manually_tracked_balances(data)\n\n\ndef account_for_manually_tracked_balances(\n db: 'DBHandler',\n balances: Dict[str, Any],\n) -> Dict[str, Any]:\n \"\"\"Given the big balances mapping adds to it all manually tracked balances\"\"\"\n manually_tracked_balances = get_manually_tracked_balances(db)\n for m_entry in manually_tracked_balances:\n location_str = str(m_entry.location)\n if location_str not in balances:\n balances[location_str] = {}\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n if m_entry.asset.identifier not in balances[location_str]:\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n old_amount = balances[location_str][m_entry.asset.identifier]['amount']\n old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': old_amount + m_entry.amount,\n 'usd_value': old_usd_value + m_entry.usd_value,\n }\n\n return balances\n", "path": "rotkehlchen/balances/manual.py"}]}
| 1,831 | 439 |
gh_patches_debug_31127
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-290
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Верни список заказов в админку
Помнится у нас был список заказов в админке, но его несколько раз ломали и выпиливали. Возвращай.
</issue>
<code>
[start of shopelectro/admin.py]
1 from django.contrib import admin
2 from django.contrib.admin.widgets import FilteredSelectMultiple
3 from django.contrib.redirects.models import Redirect
4 from django.db import models as django_models
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.translation import ugettext_lazy as _
8
9 from django_select2.forms import ModelSelect2Widget
10
11 from pages.models import CustomPage, FlatPage, PageTemplate
12 from generic_admin import inlines, models, sites
13
14 from shopelectro import models as se_models
15 from shopelectro.views.admin import TableEditor
16
17
18 class SEAdminSite(sites.SiteWithTableEditor):
19
20 site_header = 'Shopelectro administration'
21 table_editor_view = TableEditor
22
23
24 def prepare_has_filter_queryset(value, db_table, queryset):
25 if not value:
26 return
27
28 query = '{}__tags__isnull'.format(db_table)
29
30 # Use brackets, because `Explicit is better than implicit`.
31 return queryset.filter(**{query: value != 'yes'})
32
33
34 class HasTagsFilter(admin.SimpleListFilter):
35
36 product_model = se_models.Product
37 title = _('has tags')
38 parameter_name = 'has_tags'
39
40 def lookups(self, request, model_admin):
41 return (
42 ('yes', _('Has tags')),
43 ('no', _('Has no tags')),
44 )
45
46 def queryset(self, request, queryset):
47 return prepare_has_filter_queryset(
48 self.value(),
49 self.product_model._meta.db_table,
50 queryset
51 )
52
53
54 class HasCategoryFilter(admin.SimpleListFilter):
55
56 product_model = se_models.Product
57 title = _('has category')
58 parameter_name = 'has_category'
59
60 def lookups(self, request, model_admin):
61 return (
62 ('yes', _('Has category')),
63 ('no', _('Has no category')),
64 )
65
66 def queryset(self, request, queryset):
67 return prepare_has_filter_queryset(
68 self.value(),
69 self.product_model._meta.db_table,
70 queryset
71 )
72
73
74 class TagInline(admin.StackedInline):
75
76 model = se_models.Tag
77 extra = 0
78
79
80 class CategoryInline(inlines.CategoryInline):
81
82 model = se_models.Category
83
84 fieldsets = ((None, {
85 'classes': ('primary-chars', ),
86 'fields': (
87 ('name', 'id'),
88 ('parent', 'correct_parent_id'),
89 )
90 }),)
91
92
93 class ProductInline(inlines.ProductInline):
94
95 model = se_models.Product
96
97 formfield_overrides = {
98 django_models.ManyToManyField: {
99 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)
100 },
101 }
102
103 fieldsets = ((None, {
104 'classes': ('primary-chars', ),
105 'fields': (
106 ('name', 'id'),
107 ('category', 'correct_category_id'),
108 ('price', 'in_stock'),
109 'is_popular',
110 ('purchase_price', 'wholesale_small'),
111 ('wholesale_medium', 'wholesale_large'),
112 'tags',
113 )
114 }),)
115
116 def formfield_for_dbfield(self, db_field, **kwargs):
117 if db_field.name == 'category':
118 kwargs['widget'] = ModelSelect2Widget(
119 model=se_models.Category,
120 search_fields=[
121 'name__icontains',
122 'pk__startswith',
123 ],
124 )
125 return super(ProductInline, self).formfield_for_dbfield(
126 db_field,
127 **kwargs,
128 )
129
130
131 class CategoryPageAdmin(models.CategoryPageAdmin):
132
133 add = False
134 delete = False
135 inlines = [CategoryInline, inlines.ImageInline]
136
137 def get_queryset(self, request):
138 return (
139 super(CategoryPageAdmin, self)
140 .get_queryset(request)
141 .select_related('shopelectro_category')
142 )
143
144
145 class ProductPageAdmin(models.ProductPageAdmin):
146
147 add = False
148 delete = False
149 category_page_model = se_models.CategoryPage
150 list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]
151 inlines = [ProductInline, inlines.ImageInline]
152 search_fields = [
153 'shopelectro_product__vendor_code', 'name', 'slug',
154 ]
155
156 def model_id(self, obj):
157 return obj.model.vendor_code
158
159 model_id.short_description = _('Vendor code')
160 model_id.admin_order_field = 'shopelectro_product__vendor_code'
161
162 def get_queryset(self, request):
163 return (
164 super(ProductPageAdmin, self)
165 .get_queryset(request)
166 .select_related('shopelectro_product')
167 )
168
169
170 class ProductFeedbackPageAdmin(admin.ModelAdmin):
171
172 add = False
173 delete = False
174 list_filter = ['rating']
175 list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']
176
177 def links(self, obj):
178 return format_html(
179 '''
180 <a href="{url}" class="field-link" title="Посмотреть на сайте" target="_blank">
181 <i class="fa fa-link" aria-hidden="true"></i>
182 </a>
183 '''.format(url=obj.product.url))
184
185 links.short_description = _('Link')
186
187 def get_queryset(self, request):
188 return (
189 super(ProductFeedbackPageAdmin, self)
190 .get_queryset(request)
191 .select_related('product')
192 )
193
194
195 class TagGroupAdmin(admin.ModelAdmin):
196
197 list_display = ['id', 'name', 'position', 'count_tags']
198 list_display_links = ['name']
199
200 inlines = [TagInline]
201
202 def get_queryset(self, request):
203 return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')
204
205 def count_tags(self, obj):
206 return obj.tags.count()
207
208
209 class TagAdmin(admin.ModelAdmin):
210
211 search_fields = ['id', 'name']
212 list_display = ['id', 'name', 'position', 'custom_group']
213 list_display_links = ['name']
214
215 def get_queryset(self, request):
216 return super(TagAdmin, self).get_queryset(request).prefetch_related('group')
217
218 def custom_group(self, obj):
219 group = obj.group
220
221 return format_html(
222 '<a href="{url}">{group}</a>',
223 group=group,
224 url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))
225 )
226
227 custom_group.admin_order_field = 'group'
228 custom_group.short_description = _('Group')
229
230
231 se_admin = SEAdminSite(name='se_admin')
232 se_admin.register(CustomPage, models.CustomPageAdmin)
233 se_admin.register(FlatPage, models.FlatPageAdmin)
234 se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)
235
236 se_admin.register(se_models.CategoryPage, CategoryPageAdmin)
237 se_admin.register(se_models.ProductPage, ProductPageAdmin)
238 se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)
239 se_admin.register(se_models.TagGroup, TagGroupAdmin)
240 se_admin.register(se_models.Tag, TagAdmin)
241
242 se_admin.register(Redirect)
243
[end of shopelectro/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shopelectro/admin.py b/shopelectro/admin.py
--- a/shopelectro/admin.py
+++ b/shopelectro/admin.py
@@ -8,8 +8,9 @@
from django_select2.forms import ModelSelect2Widget
+from ecommerce.models import Position
from pages.models import CustomPage, FlatPage, PageTemplate
-from generic_admin import inlines, models, sites
+from generic_admin import inlines, mixins, models, sites
from shopelectro import models as se_models
from shopelectro.views.admin import TableEditor
@@ -128,6 +129,11 @@
)
+class PositionInline(admin.StackedInline):
+
+ model = Position
+
+
class CategoryPageAdmin(models.CategoryPageAdmin):
add = False
@@ -228,12 +234,21 @@
custom_group.short_description = _('Group')
+class OrderAdmin(mixins.PermissionsControl):
+
+ add = False
+ inlines = [PositionInline]
+ list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']
+ search_fields = ['name', 'email', 'phone']
+ list_display_links = ['name']
+
se_admin = SEAdminSite(name='se_admin')
se_admin.register(CustomPage, models.CustomPageAdmin)
se_admin.register(FlatPage, models.FlatPageAdmin)
se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)
se_admin.register(se_models.CategoryPage, CategoryPageAdmin)
+se_admin.register(se_models.Order, OrderAdmin)
se_admin.register(se_models.ProductPage, ProductPageAdmin)
se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)
se_admin.register(se_models.TagGroup, TagGroupAdmin)
|
{"golden_diff": "diff --git a/shopelectro/admin.py b/shopelectro/admin.py\n--- a/shopelectro/admin.py\n+++ b/shopelectro/admin.py\n@@ -8,8 +8,9 @@\n \n from django_select2.forms import ModelSelect2Widget\n \n+from ecommerce.models import Position\n from pages.models import CustomPage, FlatPage, PageTemplate\n-from generic_admin import inlines, models, sites\n+from generic_admin import inlines, mixins, models, sites\n \n from shopelectro import models as se_models\n from shopelectro.views.admin import TableEditor\n@@ -128,6 +129,11 @@\n )\n \n \n+class PositionInline(admin.StackedInline):\n+\n+ model = Position\n+\n+\n class CategoryPageAdmin(models.CategoryPageAdmin):\n \n add = False\n@@ -228,12 +234,21 @@\n custom_group.short_description = _('Group')\n \n \n+class OrderAdmin(mixins.PermissionsControl):\n+\n+ add = False\n+ inlines = [PositionInline]\n+ list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']\n+ search_fields = ['name', 'email', 'phone']\n+ list_display_links = ['name']\n+\n se_admin = SEAdminSite(name='se_admin')\n se_admin.register(CustomPage, models.CustomPageAdmin)\n se_admin.register(FlatPage, models.FlatPageAdmin)\n se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n \n se_admin.register(se_models.CategoryPage, CategoryPageAdmin)\n+se_admin.register(se_models.Order, OrderAdmin)\n se_admin.register(se_models.ProductPage, ProductPageAdmin)\n se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\n se_admin.register(se_models.TagGroup, TagGroupAdmin)\n", "issue": "\u0412\u0435\u0440\u043d\u0438 \u0441\u043f\u0438\u0441\u043e\u043a \u0437\u0430\u043a\u0430\u0437\u043e\u0432 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443\n\u041f\u043e\u043c\u043d\u0438\u0442\u0441\u044f \u0443 \u043d\u0430\u0441 \u0431\u044b\u043b \u0441\u043f\u0438\u0441\u043e\u043a \u0437\u0430\u043a\u0430\u0437\u043e\u0432 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0435, \u043d\u043e \u0435\u0433\u043e \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0440\u0430\u0437 \u043b\u043e\u043c\u0430\u043b\u0438 \u0438 \u0432\u044b\u043f\u0438\u043b\u0438\u0432\u0430\u043b\u0438. \u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0439.\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = False\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}]}
| 2,703 | 388 |
gh_patches_debug_25844
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-578
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ArrayDataSource get_data() returns scalar 0.0 when _data is None
The `ArrayDataSource` is meant to represent a 1D data structure stored in an array. For some reason, when no data value has been set, the `get_data()` method returns a scalar `0.0` value.
It would probably make most sense for the value to be `empty(shape=(0,))` (ie. a 1D array of length 0).
Changing this may break existing code which expects empty `ArrayDataSources` to return a scalar 0. On the flip side, this will potentially simplify the logic for `get_bounds()` and similar.
</issue>
<code>
[start of chaco/array_data_source.py]
1 """ Defines the ArrayDataSource class."""
2
3 # Major library imports
4 from numpy import array, isfinite, ones, ndarray
5 import numpy as np
6
7 # Enthought library imports
8 from traits.api import Any, Constant, Int, Tuple
9
10 # Chaco imports
11 from .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait
12 from .abstract_data_source import AbstractDataSource
13
14
15 def bounded_nanargmin(arr):
16 """Find the index of the minimum value, ignoring NaNs.
17
18 If all NaNs, return 0.
19 """
20 # Different versions of numpy behave differently in the all-NaN case, so we
21 # catch this condition in two different ways.
22 try:
23 if np.issubdtype(arr.dtype, np.floating):
24 min = np.nanargmin(arr)
25 elif np.issubdtype(arr.dtype, np.number):
26 min = np.argmin(arr)
27 else:
28 min = 0
29 except ValueError:
30 return 0
31 if isfinite(min):
32 return min
33 else:
34 return 0
35
36
37 def bounded_nanargmax(arr):
38 """Find the index of the maximum value, ignoring NaNs.
39
40 If all NaNs, return -1.
41 """
42 try:
43 if np.issubdtype(arr.dtype, np.floating):
44 max = np.nanargmax(arr)
45 elif np.issubdtype(arr.dtype, np.number):
46 max = np.argmax(arr)
47 else:
48 max = -1
49 except ValueError:
50 return -1
51 if isfinite(max):
52 return max
53 else:
54 return -1
55
56
57 class ArrayDataSource(AbstractDataSource):
58 """A data source representing a single, continuous array of numerical data.
59
60 This class does not listen to the array for value changes; if you need that
61 behavior, create a subclass that hooks up the appropriate listeners.
62 """
63
64 # ------------------------------------------------------------------------
65 # AbstractDataSource traits
66 # ------------------------------------------------------------------------
67
68 #: The dimensionality of the indices into this data source (overrides
69 #: AbstractDataSource).
70 index_dimension = Constant("scalar")
71
72 #: The dimensionality of the value at each index point (overrides
73 #: AbstractDataSource).
74 value_dimension = Constant("scalar")
75
76 #: The sort order of the data.
77 #: This is a specialized optimization for 1-D arrays, but it's an important
78 #: one that's used everywhere.
79 sort_order = SortOrderTrait
80
81 # ------------------------------------------------------------------------
82 # Private traits
83 # ------------------------------------------------------------------------
84
85 # The data array itself.
86 _data = NumericalSequenceTrait
87
88 # Cached values of min and max as long as **_data** doesn't change.
89 _cached_bounds = Tuple
90
91 # Not necessary, since this is not a filter, but provided for convenience.
92 _cached_mask = Any
93
94 # The index of the (first) minimum value in self._data
95 # FIXME: This is an Any instead of an Int trait because of how Traits
96 # typechecks numpy.int64 on 64-bit Windows systems.
97 _min_index = Any
98
99 # The index of the (first) maximum value in self._data
100 # FIXME: This is an Any instead of an Int trait because of how Traits
101 # typechecks numpy.int64 on 64-bit Windows systems.
102 _max_index = Any
103
104 # ------------------------------------------------------------------------
105 # Public methods
106 # ------------------------------------------------------------------------
107
108 def __init__(self, data=array([]), sort_order="none", **kw):
109 AbstractDataSource.__init__(self, **kw)
110 self.set_data(data, sort_order)
111
112 def set_data(self, newdata, sort_order=None):
113 """Sets the data, and optionally the sort order, for this data source.
114
115 Parameters
116 ----------
117 newdata : array
118 The data to use.
119 sort_order : SortOrderTrait
120 The sort order of the data
121 """
122 self._data = newdata
123 if sort_order is not None:
124 self.sort_order = sort_order
125 self._compute_bounds()
126 self.data_changed = True
127
128 def set_mask(self, mask):
129 """Sets the mask for this data source."""
130 self._cached_mask = mask
131 self.data_changed = True
132
133 def remove_mask(self):
134 """Removes the mask on this data source."""
135 self._cached_mask = None
136 self.data_changed = True
137
138 # ------------------------------------------------------------------------
139 # AbstractDataSource interface
140 # ------------------------------------------------------------------------
141
142 def get_data(self):
143 """Returns the data for this data source, or 0.0 if it has no data.
144
145 Implements AbstractDataSource.
146 """
147 if self._data is not None:
148 return self._data
149 else:
150 return 0.0
151
152 def get_data_mask(self):
153 """get_data_mask() -> (data_array, mask_array)
154
155 Implements AbstractDataSource.
156 """
157 if self._cached_mask is None:
158 return self._data, ones(len(self._data), dtype=bool)
159 else:
160 return self._data, self._cached_mask
161
162 def is_masked(self):
163 """is_masked() -> bool
164
165 Implements AbstractDataSource.
166 """
167 if self._cached_mask is not None:
168 return True
169 else:
170 return False
171
172 def get_size(self):
173 """get_size() -> int
174
175 Implements AbstractDataSource.
176 """
177 if self._data is not None:
178 return len(self._data)
179 else:
180 return 0
181
182 def get_bounds(self):
183 """Returns the minimum and maximum values of the data source's data.
184
185 Implements AbstractDataSource.
186 """
187 if (
188 self._cached_bounds is None
189 or self._cached_bounds == ()
190 or self._cached_bounds == 0.0
191 ):
192 self._compute_bounds()
193 return self._cached_bounds
194
195 def reverse_map(self, pt, index=0, outside_returns_none=True):
196 """Returns the index of *pt* in the data source.
197
198 Parameters
199 ----------
200 pt : scalar value
201 value to find
202 index
203 ignored for data series with 1-D indices
204 outside_returns_none : Boolean
205 Whether the method returns None if *pt* is outside the range of
206 the data source; if False, the method returns the value of the
207 bound that *pt* is outside of.
208 """
209 if self.sort_order == "none":
210 raise NotImplementedError
211
212 # index is ignored for dataseries with 1-dimensional indices
213 minval, maxval = self._cached_bounds
214 if pt < minval:
215 if outside_returns_none:
216 return None
217 else:
218 return self._min_index
219 elif pt > maxval:
220 if outside_returns_none:
221 return None
222 else:
223 return self._max_index
224 else:
225 return reverse_map_1d(self._data, pt, self.sort_order)
226
227 # ------------------------------------------------------------------------
228 # Private methods
229 # ------------------------------------------------------------------------
230
231 def _compute_bounds(self, data=None):
232 """Computes the minimum and maximum values of self._data.
233
234 If a data array is passed in, then that is used instead of self._data.
235 This behavior is useful for subclasses.
236 """
237 # TODO: as an optimization, perhaps create and cache a sorted
238 # version of the dataset?
239
240 if data is None:
241 # Several sources weren't setting the _data attribute, so we
242 # go through the interface. This seems like the correct thing
243 # to do anyway... right?
244 # data = self._data
245 data = self.get_data()
246
247 data_len = 0
248 try:
249 data_len = len(data)
250 except Exception:
251 pass
252 if data_len == 0:
253 self._min_index = 0
254 self._max_index = 0
255 self._cached_bounds = (0.0, 0.0)
256 elif data_len == 1:
257 self._min_index = 0
258 self._max_index = 0
259 self._cached_bounds = (data[0], data[0])
260 else:
261 if self.sort_order == "ascending":
262 self._min_index = 0
263 self._max_index = -1
264 elif self.sort_order == "descending":
265 self._min_index = -1
266 self._max_index = 0
267 else:
268 # ignore NaN values. This is probably a little slower,
269 # but also much safer.
270
271 # data might be an array of strings or objects that
272 # can't have argmin calculated on them.
273 try:
274 # the data may be in a subclass of numpy.array, viewing
275 # the data as a ndarray will remove side effects of
276 # the subclasses, such as different operator behaviors
277 self._min_index = bounded_nanargmin(data.view(ndarray))
278 self._max_index = bounded_nanargmax(data.view(ndarray))
279 except (TypeError, IndexError, NotImplementedError):
280 # For strings and objects, we punt... These show up in
281 # label-ish data sources.
282 self._cached_bounds = (0.0, 0.0)
283
284 self._cached_bounds = (
285 data[self._min_index],
286 data[self._max_index],
287 )
288
289 # ------------------------------------------------------------------------
290 # Event handlers
291 # ------------------------------------------------------------------------
292
293 def _metadata_changed(self, event):
294 self.metadata_changed = True
295
296 def _metadata_items_changed(self, event):
297 self.metadata_changed = True
298
299 # ------------------------------------------------------------------------
300 # Persistence-related methods
301 # ------------------------------------------------------------------------
302
303 def __getstate__(self):
304 state = self.__dict__.copy()
305 if not self.persist_data:
306 state.pop("_data", None)
307 state.pop("_cached_mask", None)
308 state.pop("_cached_bounds", None)
309 state.pop("_min_index", None)
310 state.pop("_max_index", None)
311 return state
312
313 def _post_load(self):
314 super(ArrayDataSource, self)._post_load()
315 self._cached_bounds = ()
316 self._cached_mask = None
317
[end of chaco/array_data_source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py
--- a/chaco/array_data_source.py
+++ b/chaco/array_data_source.py
@@ -1,7 +1,7 @@
""" Defines the ArrayDataSource class."""
# Major library imports
-from numpy import array, isfinite, ones, ndarray
+from numpy import array, empty, isfinite, ones, ndarray
import numpy as np
# Enthought library imports
@@ -147,7 +147,7 @@
if self._data is not None:
return self._data
else:
- return 0.0
+ return empty(shape=(0,))
def get_data_mask(self):
"""get_data_mask() -> (data_array, mask_array)
@@ -238,17 +238,10 @@
# version of the dataset?
if data is None:
- # Several sources weren't setting the _data attribute, so we
- # go through the interface. This seems like the correct thing
- # to do anyway... right?
- # data = self._data
data = self.get_data()
- data_len = 0
- try:
- data_len = len(data)
- except Exception:
- pass
+ data_len = len(data)
+
if data_len == 0:
self._min_index = 0
self._max_index = 0
|
{"golden_diff": "diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py\n--- a/chaco/array_data_source.py\n+++ b/chaco/array_data_source.py\n@@ -1,7 +1,7 @@\n \"\"\" Defines the ArrayDataSource class.\"\"\"\n \n # Major library imports\n-from numpy import array, isfinite, ones, ndarray\n+from numpy import array, empty, isfinite, ones, ndarray\n import numpy as np\n \n # Enthought library imports\n@@ -147,7 +147,7 @@\n if self._data is not None:\n return self._data\n else:\n- return 0.0\n+ return empty(shape=(0,))\n \n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n@@ -238,17 +238,10 @@\n # version of the dataset?\n \n if data is None:\n- # Several sources weren't setting the _data attribute, so we\n- # go through the interface. This seems like the correct thing\n- # to do anyway... right?\n- # data = self._data\n data = self.get_data()\n \n- data_len = 0\n- try:\n- data_len = len(data)\n- except Exception:\n- pass\n+ data_len = len(data)\n+\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n", "issue": "ArrayDataSource get_data() returns scalar 0.0 when _data is None\nThe `ArrayDataSource` is meant to represent a 1D data structure stored in an array. For some reason, when no data value has been set, the `get_data()` method returns a scalar `0.0` value.\n\nIt would probably make most sense for the value to be `empty(shape=(0,))` (ie. a 1D array of length 0).\n\nChanging this may break existing code which expects empty `ArrayDataSources` to return a scalar 0. On the flip side, this will potentially simplify the logic for `get_bounds()` and similar.\n\n", "before_files": [{"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return 0.0\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n # Several sources weren't setting the _data attribute, so we\n # go through the interface. This seems like the correct thing\n # to do anyway... right?\n # data = self._data\n data = self.get_data()\n\n data_len = 0\n try:\n data_len = len(data)\n except Exception:\n pass\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = self.__dict__.copy()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super(ArrayDataSource, self)._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n", "path": "chaco/array_data_source.py"}]}
| 3,673 | 320 |
gh_patches_debug_29682
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-990
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add requirement about projection units : should be meters
http://gis.stackexchange.com/questions/85955/how-can-i-obtain-the-unit-of-a-projection-srid
</issue>
<code>
[start of geotrek/common/__init__.py]
1 """
2
3 Geotrek startup script.
4
5 This is executed only once at startup.
6
7 """
8 from south.signals import post_migrate
9 from django.conf import settings
10 from django.db.models.signals import post_syncdb
11
12 from mapentity.helpers import api_bbox
13
14 from geotrek.common.utils.postgresql import load_sql_files
15
16
17 """
18 http://djangosnippets.org/snippets/2311/
19 Ensure South will update our custom SQL during a call to `migrate`.
20 """
21
22 def run_initial_sql_post_migrate(sender, **kwargs):
23 app_label = kwargs.get('app')
24 load_sql_files(app_label)
25
26
27 def run_initial_sql_post_syncdb(sender, **kwargs):
28 app = kwargs.get('app')
29 models_module = app.__name__
30 app_label = models_module.rsplit('.')[-2]
31 load_sql_files(app_label)
32
33
34 if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:
35 post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid="geotrek.core.sqlautoload")
36 # During tests, the signal is received twice unfortunately
37 # https://code.djangoproject.com/ticket/17977
38 else:
39 post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid="geotrek.core.sqlautoload")
40
41
42 """
43 Computed client-side setting.
44 """
45 settings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)
46
[end of geotrek/common/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/common/__init__.py b/geotrek/common/__init__.py
--- a/geotrek/common/__init__.py
+++ b/geotrek/common/__init__.py
@@ -5,9 +5,11 @@
This is executed only once at startup.
"""
-from south.signals import post_migrate
+from south.signals import pre_migrate, post_migrate
from django.conf import settings
-from django.db.models.signals import post_syncdb
+from django.db import connection
+from django.db.models.signals import pre_syncdb, post_syncdb
+from django.core.exceptions import ImproperlyConfigured
from mapentity.helpers import api_bbox
@@ -31,11 +33,26 @@
load_sql_files(app_label)
+def check_srid_has_meter_unit(sender, **kwargs):
+ if not hasattr(check_srid_has_meter_unit, '_checked'):
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT * FROM spatial_ref_sys
+ WHERE srtext ILIKE '%%meter%%' AND srid=%s;""", [settings.SRID])
+ results = cursor.fetchall()
+ if len(results) == 0:
+ err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID
+ raise ImproperlyConfigured(err_msg)
+ check_srid_has_meter_unit._checked = True
+
+
if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:
+ pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid="geotrek.core.checksrid")
post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid="geotrek.core.sqlautoload")
# During tests, the signal is received twice unfortunately
# https://code.djangoproject.com/ticket/17977
else:
+ pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid="geotrek.core.checksrid")
post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid="geotrek.core.sqlautoload")
|
{"golden_diff": "diff --git a/geotrek/common/__init__.py b/geotrek/common/__init__.py\n--- a/geotrek/common/__init__.py\n+++ b/geotrek/common/__init__.py\n@@ -5,9 +5,11 @@\n This is executed only once at startup.\n \n \"\"\"\n-from south.signals import post_migrate\n+from south.signals import pre_migrate, post_migrate\n from django.conf import settings\n-from django.db.models.signals import post_syncdb\n+from django.db import connection\n+from django.db.models.signals import pre_syncdb, post_syncdb\n+from django.core.exceptions import ImproperlyConfigured\n \n from mapentity.helpers import api_bbox\n \n@@ -31,11 +33,26 @@\n load_sql_files(app_label)\n \n \n+def check_srid_has_meter_unit(sender, **kwargs):\n+ if not hasattr(check_srid_has_meter_unit, '_checked'):\n+ cursor = connection.cursor()\n+ cursor.execute(\"\"\"\n+ SELECT * FROM spatial_ref_sys\n+ WHERE srtext ILIKE '%%meter%%' AND srid=%s;\"\"\", [settings.SRID])\n+ results = cursor.fetchall()\n+ if len(results) == 0:\n+ err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID\n+ raise ImproperlyConfigured(err_msg)\n+ check_srid_has_meter_unit._checked = True\n+\n+\n if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n+ pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\n else:\n+ pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n", "issue": "Add requirement about projection units : should be meters\nhttp://gis.stackexchange.com/questions/85955/how-can-i-obtain-the-unit-of-a-projection-srid\n\n", "before_files": [{"content": "\"\"\"\n\n Geotrek startup script.\n\n This is executed only once at startup.\n\n\"\"\"\nfrom south.signals import post_migrate\nfrom django.conf import settings\nfrom django.db.models.signals import post_syncdb\n\nfrom mapentity.helpers import api_bbox\n\nfrom geotrek.common.utils.postgresql import load_sql_files\n\n\n\"\"\"\n http://djangosnippets.org/snippets/2311/\n Ensure South will update our custom SQL during a call to `migrate`.\n\"\"\"\n\ndef run_initial_sql_post_migrate(sender, **kwargs):\n app_label = kwargs.get('app')\n load_sql_files(app_label)\n\n\ndef run_initial_sql_post_syncdb(sender, **kwargs):\n app = kwargs.get('app')\n models_module = app.__name__\n app_label = models_module.rsplit('.')[-2]\n load_sql_files(app_label)\n\n\nif settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\nelse:\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n\n\n\"\"\"\n Computed client-side setting.\n\"\"\"\nsettings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)\n", "path": "geotrek/common/__init__.py"}]}
| 972 | 442 |
gh_patches_debug_8892
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-2788
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The captcha does not work under Python 3
@morxa [reported](https://github.com/fedora-infra/bodhi/issues/2700#issuecomment-441245759) that the captcha was not working in Fedora's Bodhi deployment:

It turns out that it does work if Bodhi is run with Python 2, but fails under Python 3. My guess is that there is a ```str```/```bytes``` mismatch somewhere that we need to find and fix.
</issue>
<code>
[start of bodhi/server/captcha.py]
1 # -*- coding: utf-8 -*-
2 # Copyright © 2014-2018 Red Hat, Inc.
3 #
4 # This file is part of Bodhi.
5 #
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20 #
21 # Authors: Ralph Bean <[email protected]>
22 """Define utilities and a view pertaining to captcha images for unauthenticated users."""
23
24 from __future__ import division
25 import base64
26 import math
27 import random
28 import binascii
29
30 from PIL import Image, ImageDraw, ImageFont
31 from pyramid.httpexceptions import HTTPGone, HTTPNotFound
32 from pyramid.view import view_config
33 import cryptography.fernet
34 import six
35
36
37 def math_generator(plainkey, settings):
38 """
39 Given a plainkey, return its expected value.
40
41 Args:
42 plainkey (basestring or None): The key to encrypt as ciphertext. If None, a random key is
43 generated.
44 settings (bodhi.server.config.BodhiConfig): Bodhi's settings, unsued.
45 Returns:
46 tuple: A 2-tuple is returned. The first element is the plainkey, and the second is its
47 encrypted value.
48 Raises:
49 ValueError: If the plainkey is not valid.
50 """
51 if not plainkey:
52 x = random.randint(1, 100)
53 y = random.randint(1, 100)
54 plainkey = "%i + %i =" % (x, y)
55
56 tokens = plainkey.split()
57 if not len(tokens) == 4:
58 raise ValueError("%s is an invalid plainkey" % plainkey)
59
60 if tokens[1] != '+' or tokens[3] != '=':
61 raise ValueError("%s is an invalid plainkey" % plainkey)
62
63 x, y = int(tokens[0]), int(tokens[2])
64
65 value = six.text_type(x + y)
66 return plainkey, value
67
68
69 def jpeg_generator(plainkey, settings):
70 """
71 Generate an image with plainkey written in it.
72
73 Args:
74 plainkey (basestring): The text to include in the generated image.
75 settings (bodhi.server.config.BodhiConfig): Bodhi's settings.
76 Returns:
77 PIL.Image.Image: An image containing the given text.
78 """
79 image_size = image_width, image_height = (
80 settings.get('captcha.image_width'),
81 settings.get('captcha.image_height'),
82 )
83 font_path = settings.get('captcha.font_path')
84 font_size = settings.get('captcha.font_size')
85 font_color = settings.get('captcha.font_color')
86 background_color = settings.get('captcha.background_color')
87 padding = settings.get('captcha.padding')
88
89 img = Image.new('RGB', image_size, color=background_color)
90
91 font = ImageFont.truetype(font_path, font_size)
92 width, height = font.getsize(plainkey)
93
94 draw = ImageDraw.Draw(img)
95 position = (
96 random.randint(padding, (image_width - width - padding)),
97 random.randint(padding, (image_height - height - padding)))
98 draw.text(position, plainkey, font=font, fill=font_color)
99
100 # Make it crazy!
101 img = warp_image(img)
102
103 return img
104
105
106 def warp_image(image):
107 """
108 Apply some random bending operations to the given image.
109
110 This function attempts to make it harder for bots to read the text inside the image, while
111 allowing humans to read it.
112
113 Args:
114 image (PIL.Image.Image): The image to warp.
115 Returns:
116 PIL.Image.Image: A warped transformation of the given image.
117 """
118 r = 10 # individually warp a bunch of 10x10 tiles.
119 mesh_x = (image.size[0] // r) + 2
120 mesh_y = (image.size[1] // r) + 2
121
122 # Set up some random values we'll use over and over...
123 amplitude = random.uniform(6, 10)
124 period = random.uniform(0.65, 0.74)
125 offset = (
126 random.uniform(0, math.pi * 2 / period),
127 random.uniform(0, math.pi * 2 / period),
128 )
129
130 def _sine(x, y, a=amplitude, p=period, o=offset):
131 """Given a single point, warp it."""
132 return (
133 math.sin((y + o[0]) * p) * a + x,
134 math.sin((x + o[1]) * p) * a + y,
135 )
136
137 def _clamp(x, y):
138 """Don't warp things outside the bounds of the image."""
139 return (
140 max(0, min(image.size[0] - 1, x)),
141 max(0, min(image.size[1] - 1, y)),
142 )
143
144 # Build a map of the corners of our r by r tiles, warping each one.
145 warp = [
146 [
147 _clamp(*_sine(i * r, j * r))
148 for j in range(mesh_y)
149 ] for i in range(mesh_x)
150 ]
151
152 def _destination_rectangle(i, j):
153 """Return a happy tile from the original space."""
154 return (i * r, j * r, (i + 1) * r, (j + 1) * r)
155
156 def _source_quadrilateral(i, j):
157 """
158 Return the set of warped corners for a given tile.
159
160 Specified counter-clockwise as a tuple.
161 """
162 return (
163 warp[i][j][0], warp[i][j][1],
164 warp[i][j + 1][0], warp[i][j + 1][1],
165 warp[i + 1][j + 1][0], warp[i + 1][j + 1][1],
166 warp[i + 1][j][0], warp[i + 1][j][1],
167 )
168
169 # Finally, prepare our list of sources->destinations for PIL.
170 mesh = [
171 (
172 _destination_rectangle(i, j),
173 _source_quadrilateral(i, j),
174 )
175 for j in range(mesh_y - 1)
176 for i in range(mesh_x - 1)
177 ]
178 # And do it.
179 return image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)
180
181
182 def validate(request, cipherkey, value):
183 """
184 Return whether the value matches the expected value, based on the cipherkey.
185
186 Args:
187 request (pyramid.util.Request): The current web request.
188 cipherkey (basestring): The encrypted Fernet key.
189 value (basestring): The value to be validated.
190 Returns:
191 bool: True if value matches the expected value based on the cipherkey, False otherwise.
192 False is also returned if the cipherkey is not found to be a valid Fernet token.
193 """
194 settings = request.registry.settings
195
196 plainkey = decrypt(cipherkey, settings)
197
198 _, expected_value = math_generator(plainkey=plainkey, settings=settings)
199 return value == expected_value
200
201
202 def generate_captcha(context, request):
203 """
204 Generate a key and a URL to a captcha image that matches the key.
205
206 Args:
207 context (mako.runtime.Context): Unused.
208 request (pyramid.util.Request): The current web request.
209 Returns:
210 tuple: A 2-tuple of strings. The first is the ciphertext key for a captcha, and the second
211 is a URL to the captcha image that matches that key.
212 """
213 settings = request.registry.settings
214 plainkey, value = math_generator(plainkey=None, settings=settings)
215 cipherkey = encrypt(plainkey, settings)
216 url = request.route_url('captcha_image', cipherkey=cipherkey)
217 request.session['captcha'] = cipherkey # Remember this to stop replay.
218 return cipherkey, url
219
220
221 def encrypt(plaintext, settings):
222 """
223 Calculate and return the ciphertext key from the given plaintext key.
224
225 Args:
226 plaintext (basestring): A key you wish you encrypt.
227 settings (bodhi.server.config.BodhiConfig): Bodhi's settings.
228 Returns:
229 str: The ciphertext version of the given captcha key.
230 """
231 secret = settings['captcha.secret']
232 engine = cryptography.fernet.Fernet(secret)
233 ciphertext = engine.encrypt(plaintext.encode('utf-8'))
234 ciphertext = base64.urlsafe_b64encode(ciphertext)
235 return ciphertext
236
237
238 def decrypt(ciphertext, settings):
239 """
240 Calculate and return the plaintext key from the given ciphertext.
241
242 Args:
243 ciphertext (str): The encrypted secret for a captcha image.
244 settings (bodhi.server.config.BodhiConfig): Bodhi's settings.
245 Returns:
246 unicode: The plaintext secret for a captcha image.
247 Raises:
248 pyramid.httpexceptions.HTTPNotFound: If the ciphertext can not be decoded as base64.
249 pyramid.httpexceptions.HTTPGone: If the captcha token has expired.
250 """
251 ttl = settings['captcha.ttl']
252 secret = settings['captcha.secret']
253 engine = cryptography.fernet.Fernet(secret)
254
255 if isinstance(ciphertext, six.text_type):
256 ciphertext = ciphertext.encode('utf-8')
257
258 try:
259 ciphertext = base64.urlsafe_b64decode(ciphertext)
260 except (TypeError, binascii.Error):
261 raise HTTPNotFound("%s is garbage" % ciphertext.decode('utf-8'))
262
263 try:
264 plaintext = engine.decrypt(ciphertext, ttl=ttl)
265 except cryptography.fernet.InvalidToken:
266 raise HTTPGone('captcha token is no longer valid')
267
268 return plaintext.decode('utf-8')
269
270
271 @view_config(route_name='captcha_image', renderer='jpeg')
272 def captcha_image(request):
273 """
274 Generate and return a captcha image.
275
276 Args:
277 request (pyramid.util.Request): The current web request.
278 Returns:
279 PIL.Image.Image: The generated captcha image.
280 """
281 cipherkey = request.matchdict['cipherkey']
282 plainkey = decrypt(cipherkey, request.registry.settings)
283 image = jpeg_generator(plainkey, request.registry.settings)
284 return image
285
[end of bodhi/server/captcha.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bodhi/server/captcha.py b/bodhi/server/captcha.py
--- a/bodhi/server/captcha.py
+++ b/bodhi/server/captcha.py
@@ -212,7 +212,7 @@
"""
settings = request.registry.settings
plainkey, value = math_generator(plainkey=None, settings=settings)
- cipherkey = encrypt(plainkey, settings)
+ cipherkey = encrypt(plainkey, settings).decode('utf8')
url = request.route_url('captcha_image', cipherkey=cipherkey)
request.session['captcha'] = cipherkey # Remember this to stop replay.
return cipherkey, url
|
{"golden_diff": "diff --git a/bodhi/server/captcha.py b/bodhi/server/captcha.py\n--- a/bodhi/server/captcha.py\n+++ b/bodhi/server/captcha.py\n@@ -212,7 +212,7 @@\n \"\"\"\n settings = request.registry.settings\n plainkey, value = math_generator(plainkey=None, settings=settings)\n- cipherkey = encrypt(plainkey, settings)\n+ cipherkey = encrypt(plainkey, settings).decode('utf8')\n url = request.route_url('captcha_image', cipherkey=cipherkey)\n request.session['captcha'] = cipherkey # Remember this to stop replay.\n return cipherkey, url\n", "issue": "The captcha does not work under Python 3\n@morxa [reported](https://github.com/fedora-infra/bodhi/issues/2700#issuecomment-441245759) that the captcha was not working in Fedora's Bodhi deployment:\r\n\r\n\r\n\r\n\r\nIt turns out that it does work if Bodhi is run with Python 2, but fails under Python 3. My guess is that there is a ```str```/```bytes``` mismatch somewhere that we need to find and fix.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2014-2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA.\n#\n# Authors: Ralph Bean <[email protected]>\n\"\"\"Define utilities and a view pertaining to captcha images for unauthenticated users.\"\"\"\n\nfrom __future__ import division\nimport base64\nimport math\nimport random\nimport binascii\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pyramid.httpexceptions import HTTPGone, HTTPNotFound\nfrom pyramid.view import view_config\nimport cryptography.fernet\nimport six\n\n\ndef math_generator(plainkey, settings):\n \"\"\"\n Given a plainkey, return its expected value.\n\n Args:\n plainkey (basestring or None): The key to encrypt as ciphertext. If None, a random key is\n generated.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings, unsued.\n Returns:\n tuple: A 2-tuple is returned. The first element is the plainkey, and the second is its\n encrypted value.\n Raises:\n ValueError: If the plainkey is not valid.\n \"\"\"\n if not plainkey:\n x = random.randint(1, 100)\n y = random.randint(1, 100)\n plainkey = \"%i + %i =\" % (x, y)\n\n tokens = plainkey.split()\n if not len(tokens) == 4:\n raise ValueError(\"%s is an invalid plainkey\" % plainkey)\n\n if tokens[1] != '+' or tokens[3] != '=':\n raise ValueError(\"%s is an invalid plainkey\" % plainkey)\n\n x, y = int(tokens[0]), int(tokens[2])\n\n value = six.text_type(x + y)\n return plainkey, value\n\n\ndef jpeg_generator(plainkey, settings):\n \"\"\"\n Generate an image with plainkey written in it.\n\n Args:\n plainkey (basestring): The text to include in the generated image.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n PIL.Image.Image: An image containing the given text.\n \"\"\"\n image_size = image_width, image_height = (\n settings.get('captcha.image_width'),\n settings.get('captcha.image_height'),\n )\n font_path = settings.get('captcha.font_path')\n font_size = settings.get('captcha.font_size')\n font_color = settings.get('captcha.font_color')\n background_color = settings.get('captcha.background_color')\n padding = settings.get('captcha.padding')\n\n img = Image.new('RGB', image_size, color=background_color)\n\n font = ImageFont.truetype(font_path, font_size)\n width, height = font.getsize(plainkey)\n\n draw = ImageDraw.Draw(img)\n position = (\n random.randint(padding, (image_width - width - padding)),\n random.randint(padding, (image_height - height - padding)))\n draw.text(position, plainkey, font=font, fill=font_color)\n\n # Make it crazy!\n img = warp_image(img)\n\n return img\n\n\ndef warp_image(image):\n \"\"\"\n Apply some random bending operations to the given image.\n\n This function attempts to make it harder for bots to read the text inside the image, while\n allowing humans to read it.\n\n Args:\n image (PIL.Image.Image): The image to warp.\n Returns:\n PIL.Image.Image: A warped transformation of the given image.\n \"\"\"\n r = 10 # individually warp a bunch of 10x10 tiles.\n mesh_x = (image.size[0] // r) + 2\n mesh_y = (image.size[1] // r) + 2\n\n # Set up some random values we'll use over and over...\n amplitude = random.uniform(6, 10)\n period = random.uniform(0.65, 0.74)\n offset = (\n random.uniform(0, math.pi * 2 / period),\n random.uniform(0, math.pi * 2 / period),\n )\n\n def _sine(x, y, a=amplitude, p=period, o=offset):\n \"\"\"Given a single point, warp it.\"\"\"\n return (\n math.sin((y + o[0]) * p) * a + x,\n math.sin((x + o[1]) * p) * a + y,\n )\n\n def _clamp(x, y):\n \"\"\"Don't warp things outside the bounds of the image.\"\"\"\n return (\n max(0, min(image.size[0] - 1, x)),\n max(0, min(image.size[1] - 1, y)),\n )\n\n # Build a map of the corners of our r by r tiles, warping each one.\n warp = [\n [\n _clamp(*_sine(i * r, j * r))\n for j in range(mesh_y)\n ] for i in range(mesh_x)\n ]\n\n def _destination_rectangle(i, j):\n \"\"\"Return a happy tile from the original space.\"\"\"\n return (i * r, j * r, (i + 1) * r, (j + 1) * r)\n\n def _source_quadrilateral(i, j):\n \"\"\"\n Return the set of warped corners for a given tile.\n\n Specified counter-clockwise as a tuple.\n \"\"\"\n return (\n warp[i][j][0], warp[i][j][1],\n warp[i][j + 1][0], warp[i][j + 1][1],\n warp[i + 1][j + 1][0], warp[i + 1][j + 1][1],\n warp[i + 1][j][0], warp[i + 1][j][1],\n )\n\n # Finally, prepare our list of sources->destinations for PIL.\n mesh = [\n (\n _destination_rectangle(i, j),\n _source_quadrilateral(i, j),\n )\n for j in range(mesh_y - 1)\n for i in range(mesh_x - 1)\n ]\n # And do it.\n return image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)\n\n\ndef validate(request, cipherkey, value):\n \"\"\"\n Return whether the value matches the expected value, based on the cipherkey.\n\n Args:\n request (pyramid.util.Request): The current web request.\n cipherkey (basestring): The encrypted Fernet key.\n value (basestring): The value to be validated.\n Returns:\n bool: True if value matches the expected value based on the cipherkey, False otherwise.\n False is also returned if the cipherkey is not found to be a valid Fernet token.\n \"\"\"\n settings = request.registry.settings\n\n plainkey = decrypt(cipherkey, settings)\n\n _, expected_value = math_generator(plainkey=plainkey, settings=settings)\n return value == expected_value\n\n\ndef generate_captcha(context, request):\n \"\"\"\n Generate a key and a URL to a captcha image that matches the key.\n\n Args:\n context (mako.runtime.Context): Unused.\n request (pyramid.util.Request): The current web request.\n Returns:\n tuple: A 2-tuple of strings. The first is the ciphertext key for a captcha, and the second\n is a URL to the captcha image that matches that key.\n \"\"\"\n settings = request.registry.settings\n plainkey, value = math_generator(plainkey=None, settings=settings)\n cipherkey = encrypt(plainkey, settings)\n url = request.route_url('captcha_image', cipherkey=cipherkey)\n request.session['captcha'] = cipherkey # Remember this to stop replay.\n return cipherkey, url\n\n\ndef encrypt(plaintext, settings):\n \"\"\"\n Calculate and return the ciphertext key from the given plaintext key.\n\n Args:\n plaintext (basestring): A key you wish you encrypt.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n str: The ciphertext version of the given captcha key.\n \"\"\"\n secret = settings['captcha.secret']\n engine = cryptography.fernet.Fernet(secret)\n ciphertext = engine.encrypt(plaintext.encode('utf-8'))\n ciphertext = base64.urlsafe_b64encode(ciphertext)\n return ciphertext\n\n\ndef decrypt(ciphertext, settings):\n \"\"\"\n Calculate and return the plaintext key from the given ciphertext.\n\n Args:\n ciphertext (str): The encrypted secret for a captcha image.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n unicode: The plaintext secret for a captcha image.\n Raises:\n pyramid.httpexceptions.HTTPNotFound: If the ciphertext can not be decoded as base64.\n pyramid.httpexceptions.HTTPGone: If the captcha token has expired.\n \"\"\"\n ttl = settings['captcha.ttl']\n secret = settings['captcha.secret']\n engine = cryptography.fernet.Fernet(secret)\n\n if isinstance(ciphertext, six.text_type):\n ciphertext = ciphertext.encode('utf-8')\n\n try:\n ciphertext = base64.urlsafe_b64decode(ciphertext)\n except (TypeError, binascii.Error):\n raise HTTPNotFound(\"%s is garbage\" % ciphertext.decode('utf-8'))\n\n try:\n plaintext = engine.decrypt(ciphertext, ttl=ttl)\n except cryptography.fernet.InvalidToken:\n raise HTTPGone('captcha token is no longer valid')\n\n return plaintext.decode('utf-8')\n\n\n@view_config(route_name='captcha_image', renderer='jpeg')\ndef captcha_image(request):\n \"\"\"\n Generate and return a captcha image.\n\n Args:\n request (pyramid.util.Request): The current web request.\n Returns:\n PIL.Image.Image: The generated captcha image.\n \"\"\"\n cipherkey = request.matchdict['cipherkey']\n plainkey = decrypt(cipherkey, request.registry.settings)\n image = jpeg_generator(plainkey, request.registry.settings)\n return image\n", "path": "bodhi/server/captcha.py"}]}
| 3,822 | 148 |
gh_patches_debug_47980
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-564
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary Loop
https://github.com/TheAlgorithms/Python/blob/4e0184a41dd3e4838da484057d25e17234353da0/dynamic_programming/matrix_chain_order.py#L12-L15
`Line 12` creates a NxN Matrix that **contains 0**. So there is no need for `Line 14-15`
</issue>
<code>
[start of dynamic_programming/matrix_chain_order.py]
1 from __future__ import print_function
2
3 import sys
4 '''
5 Dynamic Programming
6 Implementation of Matrix Chain Multiplication
7 Time Complexity: O(n^3)
8 Space Complexity: O(n^2)
9 '''
10 def MatrixChainOrder(array):
11 N=len(array)
12 Matrix=[[0 for x in range(N)] for x in range(N)]
13 Sol=[[0 for x in range(N)] for x in range(N)]
14 for i in range(1,N):
15 Matrix[i][i]=0
16
17 for ChainLength in range(2,N):
18 for a in range(1,N-ChainLength+1):
19 b = a+ChainLength-1
20
21 Matrix[a][b] = sys.maxsize
22 for c in range(a , b):
23 cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]
24 if cost < Matrix[a][b]:
25 Matrix[a][b] = cost
26 Sol[a][b] = c
27 return Matrix , Sol
28 #Print order of matrix with Ai as Matrix
29 def PrintOptimalSolution(OptimalSolution,i,j):
30 if i==j:
31 print("A" + str(i),end = " ")
32 else:
33 print("(",end = " ")
34 PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])
35 PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)
36 print(")",end = " ")
37
38 def main():
39 array=[30,35,15,5,10,20,25]
40 n=len(array)
41 #Size of matrix created from above array will be
42 # 30*35 35*15 15*5 5*10 10*20 20*25
43 Matrix , OptimalSolution = MatrixChainOrder(array)
44
45 print("No. of Operation required: "+str((Matrix[1][n-1])))
46 PrintOptimalSolution(OptimalSolution,1,n-1)
47 if __name__ == '__main__':
48 main()
49
[end of dynamic_programming/matrix_chain_order.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py
--- a/dynamic_programming/matrix_chain_order.py
+++ b/dynamic_programming/matrix_chain_order.py
@@ -11,8 +11,6 @@
N=len(array)
Matrix=[[0 for x in range(N)] for x in range(N)]
Sol=[[0 for x in range(N)] for x in range(N)]
- for i in range(1,N):
- Matrix[i][i]=0
for ChainLength in range(2,N):
for a in range(1,N-ChainLength+1):
|
{"golden_diff": "diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py\n--- a/dynamic_programming/matrix_chain_order.py\n+++ b/dynamic_programming/matrix_chain_order.py\n@@ -11,8 +11,6 @@\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n- for i in range(1,N):\n- Matrix[i][i]=0\n \n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n", "issue": "Unnecessary Loop \nhttps://github.com/TheAlgorithms/Python/blob/4e0184a41dd3e4838da484057d25e17234353da0/dynamic_programming/matrix_chain_order.py#L12-L15\r\n\r\n`Line 12` creates a NxN Matrix that **contains 0**. So there is no need for `Line 14-15`\n", "before_files": [{"content": "from __future__ import print_function\n\nimport sys\n'''\nDynamic Programming\nImplementation of Matrix Chain Multiplication\nTime Complexity: O(n^3)\nSpace Complexity: O(n^2)\n'''\ndef MatrixChainOrder(array):\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n for i in range(1,N):\n Matrix[i][i]=0\n\n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n b = a+ChainLength-1\n\n Matrix[a][b] = sys.maxsize\n for c in range(a , b):\n cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]\n if cost < Matrix[a][b]:\n Matrix[a][b] = cost\n Sol[a][b] = c\n return Matrix , Sol\n#Print order of matrix with Ai as Matrix\ndef PrintOptimalSolution(OptimalSolution,i,j):\n if i==j:\n print(\"A\" + str(i),end = \" \")\n else:\n print(\"(\",end = \" \")\n PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])\n PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)\n print(\")\",end = \" \")\n\ndef main():\n array=[30,35,15,5,10,20,25]\n n=len(array)\n #Size of matrix created from above array will be\n # 30*35 35*15 15*5 5*10 10*20 20*25\n Matrix , OptimalSolution = MatrixChainOrder(array)\n\n print(\"No. of Operation required: \"+str((Matrix[1][n-1])))\n PrintOptimalSolution(OptimalSolution,1,n-1)\nif __name__ == '__main__':\n main()\n", "path": "dynamic_programming/matrix_chain_order.py"}]}
| 1,187 | 140 |
gh_patches_debug_19547
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1619
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastAPI/Starlette: Truncate exception message to prevent "event exceeded the permitted size" error from APM Server
**Describe the bug**:
The capture_exception() method is sending the whole exception message to the APM Server and when the message exceeds the maximum size, the APM Server returns the "event exceeded the permitted size" error.
I'm sorry if it is not a bug, but I believe that the apm-agent should truncate the message before sending it to the APM Server.
**To Reproduce**
Create a FastAPI route that always raise an error with a large exception message (as the example below) and it will show an "event exceeded the permitted size" error on the next attempt to send events to APM server
```
@app.get('/apm_test/')
async def apm_test():
try:
raise Exception('test' * 1000000)
except Exception as e:
apm_client.capture_exception()
return PlainTextResponse('', 200)
```
**Environment**
- OS: ubuntu 20.04
- Python version: 3.9.13
- Framework and version: fastapi==0.79.0
- APM Server version: 8.3.0
- Agent version: 6.11.0
</issue>
<code>
[start of elasticapm/events.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 import random
33 import sys
34
35 from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH
36 from elasticapm.utils import varmap
37 from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
38 from elasticapm.utils.logging import get_logger
39 from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
40
41 __all__ = ("BaseEvent", "Exception", "Message")
42
43 logger = get_logger("elasticapm.events")
44
45
46 class BaseEvent(object):
47 @staticmethod
48 def to_string(client, data):
49 raise NotImplementedError
50
51 @staticmethod
52 def capture(client, **kwargs):
53 return {}
54
55
56 class Exception(BaseEvent):
57 """
58 Exceptions store the following metadata:
59
60 - value: 'My exception value'
61 - type: 'ClassName'
62 - module '__builtin__' (i.e. __builtin__.TypeError)
63 - frames: a list of serialized frames (see _get_traceback_frames)
64 """
65
66 @staticmethod
67 def to_string(client, data):
68 exc = data["exception"]
69 if exc["value"]:
70 return "%s: %s" % (exc["type"], exc["value"])
71 return exc["type"]
72
73 @staticmethod
74 def get_hash(data):
75 exc = data["exception"]
76 output = [exc["type"]]
77 for frame in data["stacktrace"]["frames"]:
78 output.append(frame["module"])
79 output.append(frame["function"])
80 return output
81
82 @staticmethod
83 def capture(client, exc_info=None, **kwargs):
84 culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None
85 new_exc_info = False
86 if not exc_info or exc_info is True:
87 new_exc_info = True
88 exc_info = sys.exc_info()
89
90 if exc_info == (None, None, None):
91 raise ValueError("No exception found: capture_exception requires an active exception.")
92
93 try:
94 exc_type, exc_value, exc_traceback = exc_info
95
96 frames = get_stack_info(
97 iter_traceback_frames(exc_traceback, config=client.config),
98 with_locals=client.config.collect_local_variables in ("errors", "all"),
99 library_frame_context_lines=client.config.source_lines_error_library_frames,
100 in_app_frame_context_lines=client.config.source_lines_error_app_frames,
101 include_paths_re=client.include_paths_re,
102 exclude_paths_re=client.exclude_paths_re,
103 locals_processor_func=lambda local_var: varmap(
104 lambda k, val: shorten(
105 val,
106 list_length=client.config.local_var_list_max_length,
107 string_length=client.config.local_var_max_length,
108 dict_length=client.config.local_var_dict_max_length,
109 ),
110 local_var,
111 ),
112 )
113
114 culprit = kwargs.get("culprit", None) or get_culprit(
115 frames, client.config.include_paths, client.config.exclude_paths
116 )
117
118 if hasattr(exc_type, "__module__"):
119 exc_module = exc_type.__module__
120 exc_type = exc_type.__name__
121 else:
122 exc_module = None
123 exc_type = exc_type.__name__
124 finally:
125 if new_exc_info:
126 try:
127 del exc_info
128 del exc_traceback
129 except Exception as e:
130 logger.exception(e)
131 if "message" in kwargs:
132 message = kwargs["message"]
133 else:
134 message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
135
136 data = {
137 "id": "%032x" % random.getrandbits(128),
138 "culprit": keyword_field(culprit),
139 "exception": {
140 "message": message,
141 "type": keyword_field(str(exc_type)),
142 "module": keyword_field(str(exc_module)),
143 "stacktrace": frames,
144 },
145 }
146 if hasattr(exc_value, "_elastic_apm_span_id"):
147 data["parent_id"] = exc_value._elastic_apm_span_id
148 del exc_value._elastic_apm_span_id
149 depth = kwargs.get("_exc_chain_depth", 0)
150 if depth > EXCEPTION_CHAIN_MAX_DEPTH:
151 return
152 cause = exc_value.__cause__
153 chained_context = exc_value.__context__
154
155 # we follow the pattern of Python itself here and only capture the chained exception
156 # if cause is not None and __suppress_context__ is False
157 if chained_context and not (exc_value.__suppress_context__ and cause is None):
158 if cause:
159 chained_exc_type = type(cause)
160 chained_exc_value = cause
161 else:
162 chained_exc_type = type(chained_context)
163 chained_exc_value = chained_context
164 chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__
165
166 chained_cause = Exception.capture(
167 client, exc_info=chained_exc_info, culprit="None", _exc_chain_depth=depth + 1
168 )
169 if chained_cause:
170 data["exception"]["cause"] = [chained_cause["exception"]]
171 return data
172
173
174 class Message(BaseEvent):
175 """
176 Messages store the following metadata:
177
178 - message: 'My message from %s about %s'
179 - params: ('foo', 'bar')
180 """
181
182 @staticmethod
183 def to_string(client, data):
184 return data["log"]["message"]
185
186 @staticmethod
187 def get_hash(data):
188 msg = data["param_message"]
189 return [msg["message"]]
190
191 @staticmethod
192 def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):
193 if message:
194 param_message = {"message": message}
195 params = param_message.get("params")
196 message = param_message["message"] % params if params else param_message["message"]
197 data = kwargs.get("data", {})
198 message_data = {
199 "id": "%032x" % random.getrandbits(128),
200 "log": {
201 "level": keyword_field(level or "error"),
202 "logger_name": keyword_field(logger_name or "__root__"),
203 "message": message,
204 "param_message": keyword_field(param_message["message"]),
205 },
206 }
207 if isinstance(data.get("stacktrace"), dict):
208 message_data["log"]["stacktrace"] = data["stacktrace"]["frames"]
209 if kwargs.get("exception"):
210 message_data["culprit"] = kwargs["exception"]["culprit"]
211 message_data["exception"] = kwargs["exception"]["exception"]
212 return message_data
213
[end of elasticapm/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/events.py b/elasticapm/events.py
--- a/elasticapm/events.py
+++ b/elasticapm/events.py
@@ -33,7 +33,7 @@
import sys
from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH
-from elasticapm.utils import varmap
+from elasticapm.utils import encoding, varmap
from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
from elasticapm.utils.logging import get_logger
from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
@@ -133,6 +133,8 @@
else:
message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
+ message = encoding.long_field(message)
+
data = {
"id": "%032x" % random.getrandbits(128),
"culprit": keyword_field(culprit),
|
{"golden_diff": "diff --git a/elasticapm/events.py b/elasticapm/events.py\n--- a/elasticapm/events.py\n+++ b/elasticapm/events.py\n@@ -33,7 +33,7 @@\n import sys\n \n from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\n-from elasticapm.utils import varmap\n+from elasticapm.utils import encoding, varmap\n from elasticapm.utils.encoding import keyword_field, shorten, to_unicode\n from elasticapm.utils.logging import get_logger\n from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n@@ -133,6 +133,8 @@\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n \n+ message = encoding.long_field(message)\n+\n data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n", "issue": "FastAPI/Starlette: Truncate exception message to prevent \"event exceeded the permitted size\" error from APM Server\n**Describe the bug**: \r\nThe capture_exception() method is sending the whole exception message to the APM Server and when the message exceeds the maximum size, the APM Server returns the \"event exceeded the permitted size\" error. \r\n\r\nI'm sorry if it is not a bug, but I believe that the apm-agent should truncate the message before sending it to the APM Server.\r\n\r\n\r\n**To Reproduce**\r\nCreate a FastAPI route that always raise an error with a large exception message (as the example below) and it will show an \"event exceeded the permitted size\" error on the next attempt to send events to APM server\r\n\r\n```\r\[email protected]('/apm_test/')\r\nasync def apm_test():\r\n try:\r\n raise Exception('test' * 1000000)\r\n except Exception as e:\r\n apm_client.capture_exception()\r\n return PlainTextResponse('', 200)\r\n```\r\n\r\n**Environment**\r\n- OS: ubuntu 20.04\r\n- Python version: 3.9.13\r\n- Framework and version: fastapi==0.79.0\r\n- APM Server version: 8.3.0\r\n- Agent version: 6.11.0\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport random\nimport sys\n\nfrom elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\nfrom elasticapm.utils import varmap\nfrom elasticapm.utils.encoding import keyword_field, shorten, to_unicode\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n\n__all__ = (\"BaseEvent\", \"Exception\", \"Message\")\n\nlogger = get_logger(\"elasticapm.events\")\n\n\nclass BaseEvent(object):\n @staticmethod\n def to_string(client, data):\n raise NotImplementedError\n\n @staticmethod\n def capture(client, **kwargs):\n return {}\n\n\nclass Exception(BaseEvent):\n \"\"\"\n Exceptions store the following metadata:\n\n - value: 'My exception value'\n - type: 'ClassName'\n - module '__builtin__' (i.e. __builtin__.TypeError)\n - frames: a list of serialized frames (see _get_traceback_frames)\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n exc = data[\"exception\"]\n if exc[\"value\"]:\n return \"%s: %s\" % (exc[\"type\"], exc[\"value\"])\n return exc[\"type\"]\n\n @staticmethod\n def get_hash(data):\n exc = data[\"exception\"]\n output = [exc[\"type\"]]\n for frame in data[\"stacktrace\"][\"frames\"]:\n output.append(frame[\"module\"])\n output.append(frame[\"function\"])\n return output\n\n @staticmethod\n def capture(client, exc_info=None, **kwargs):\n culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None\n new_exc_info = False\n if not exc_info or exc_info is True:\n new_exc_info = True\n exc_info = sys.exc_info()\n\n if exc_info == (None, None, None):\n raise ValueError(\"No exception found: capture_exception requires an active exception.\")\n\n try:\n exc_type, exc_value, exc_traceback = exc_info\n\n frames = get_stack_info(\n iter_traceback_frames(exc_traceback, config=client.config),\n with_locals=client.config.collect_local_variables in (\"errors\", \"all\"),\n library_frame_context_lines=client.config.source_lines_error_library_frames,\n in_app_frame_context_lines=client.config.source_lines_error_app_frames,\n include_paths_re=client.include_paths_re,\n exclude_paths_re=client.exclude_paths_re,\n locals_processor_func=lambda local_var: varmap(\n lambda k, val: shorten(\n val,\n list_length=client.config.local_var_list_max_length,\n string_length=client.config.local_var_max_length,\n dict_length=client.config.local_var_dict_max_length,\n ),\n local_var,\n ),\n )\n\n culprit = kwargs.get(\"culprit\", None) or get_culprit(\n frames, client.config.include_paths, client.config.exclude_paths\n )\n\n if hasattr(exc_type, \"__module__\"):\n exc_module = exc_type.__module__\n exc_type = exc_type.__name__\n else:\n exc_module = None\n exc_type = exc_type.__name__\n finally:\n if new_exc_info:\n try:\n del exc_info\n del exc_traceback\n except Exception as e:\n logger.exception(e)\n if \"message\" in kwargs:\n message = kwargs[\"message\"]\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n\n data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n \"exception\": {\n \"message\": message,\n \"type\": keyword_field(str(exc_type)),\n \"module\": keyword_field(str(exc_module)),\n \"stacktrace\": frames,\n },\n }\n if hasattr(exc_value, \"_elastic_apm_span_id\"):\n data[\"parent_id\"] = exc_value._elastic_apm_span_id\n del exc_value._elastic_apm_span_id\n depth = kwargs.get(\"_exc_chain_depth\", 0)\n if depth > EXCEPTION_CHAIN_MAX_DEPTH:\n return\n cause = exc_value.__cause__\n chained_context = exc_value.__context__\n\n # we follow the pattern of Python itself here and only capture the chained exception\n # if cause is not None and __suppress_context__ is False\n if chained_context and not (exc_value.__suppress_context__ and cause is None):\n if cause:\n chained_exc_type = type(cause)\n chained_exc_value = cause\n else:\n chained_exc_type = type(chained_context)\n chained_exc_value = chained_context\n chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__\n\n chained_cause = Exception.capture(\n client, exc_info=chained_exc_info, culprit=\"None\", _exc_chain_depth=depth + 1\n )\n if chained_cause:\n data[\"exception\"][\"cause\"] = [chained_cause[\"exception\"]]\n return data\n\n\nclass Message(BaseEvent):\n \"\"\"\n Messages store the following metadata:\n\n - message: 'My message from %s about %s'\n - params: ('foo', 'bar')\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n return data[\"log\"][\"message\"]\n\n @staticmethod\n def get_hash(data):\n msg = data[\"param_message\"]\n return [msg[\"message\"]]\n\n @staticmethod\n def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):\n if message:\n param_message = {\"message\": message}\n params = param_message.get(\"params\")\n message = param_message[\"message\"] % params if params else param_message[\"message\"]\n data = kwargs.get(\"data\", {})\n message_data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"log\": {\n \"level\": keyword_field(level or \"error\"),\n \"logger_name\": keyword_field(logger_name or \"__root__\"),\n \"message\": message,\n \"param_message\": keyword_field(param_message[\"message\"]),\n },\n }\n if isinstance(data.get(\"stacktrace\"), dict):\n message_data[\"log\"][\"stacktrace\"] = data[\"stacktrace\"][\"frames\"]\n if kwargs.get(\"exception\"):\n message_data[\"culprit\"] = kwargs[\"exception\"][\"culprit\"]\n message_data[\"exception\"] = kwargs[\"exception\"][\"exception\"]\n return message_data\n", "path": "elasticapm/events.py"}]}
| 3,083 | 221 |
gh_patches_debug_3449
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-608
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
README broken on PyPI (must be reStructuredText)
The [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.
Solution A: Convert to reST
---------------------
1. Rename the current `README.md` to `README.rst`
1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))
1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))
Solution B: Process before Upload
-------------------
1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))
1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))
------------
Both solutions above will render a nicely formatted, HTML-styled package description on PyPI.
Quality Assurance
--------------
Optionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.
</issue>
<code>
[start of setup.py]
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 from os.path import abspath, dirname, join
5 from setuptools import find_packages, setup
6
7 def read_file(filename):
8 """Read the contents of a file located relative to setup.py"""
9 with open(join(abspath(dirname(__file__)), filename)) as thefile:
10 return thefile.read()
11
12 setup(
13 name='pip-tools',
14 use_scm_version=True,
15 url='https://github.com/jazzband/pip-tools/',
16 license='BSD',
17 author='Vincent Driessen',
18 author_email='[email protected]',
19 description=__doc__,
20 long_description=read_file('README.rst'),
21 packages=find_packages(exclude=['tests']),
22 setup_requires=['setuptools_scm'],
23 install_requires=[
24 'click>=6',
25 'first',
26 'six',
27 'setuptools'
28 ],
29 zip_safe=False,
30 entry_points={
31 'console_scripts': [
32 'pip-compile = piptools.scripts.compile:cli',
33 'pip-sync = piptools.scripts.sync:cli',
34 ],
35 },
36 platforms='any',
37 classifiers=[
38 'Development Status :: 5 - Production/Stable',
39 'Intended Audience :: Developers',
40 'Intended Audience :: System Administrators',
41 'License :: OSI Approved :: BSD License',
42 'Operating System :: OS Independent',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Programming Language :: Python :: 3.5',
49 'Programming Language :: Python :: 3.6',
50 'Topic :: System :: Systems Administration',
51 ]
52 )
53
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
license='BSD',
author='Vincent Driessen',
author_email='[email protected]',
- description=__doc__,
+ description=__doc__.strip(),
long_description=read_file('README.rst'),
packages=find_packages(exclude=['tests']),
setup_requires=['setuptools_scm'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n- description=__doc__,\n+ description=__doc__.strip(),\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n", "issue": "README broken on PyPI (must be reStructuredText)\nThe [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.\r\n\r\nSolution A: Convert to reST\r\n---------------------\r\n\r\n1. Rename the current `README.md` to `README.rst`\r\n1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))\r\n1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))\r\n\r\nSolution B: Process before Upload\r\n-------------------\r\n\r\n1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))\r\n1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))\r\n\r\n------------\r\n\r\nBoth solutions above will render a nicely formatted, HTML-styled package description on PyPI.\r\n\r\nQuality Assurance\r\n--------------\r\n\r\nOptionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\nfrom setuptools import find_packages, setup\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]}
| 1,389 | 98 |
gh_patches_debug_21155
|
rasdani/github-patches
|
git_diff
|
kedro-org__kedro-1182
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ExcelDataSet not working after pandas 1.4.0 release (python 3.8)
## Description
On a fresh new project, after installing the extra dependency `kedro[pandas.ExcelDataSet]`, the user receives a runtime error when trying to load a xslx file dataset via the data catalog. The cause of the issue seems to be an incompatibility with the new pandas `v1.4.0` and `xlrd~=1.0`, the latter being enforced by kedro `17.6.0`.
## Context
The error occurred while I was following the exact steps of the spaceflights tutorial. In order to load the `shuttles` dataset (stored as a xslx file), I was instructed to install the extra dependency `kedro[pandas.ExcelDataSet]`. After installing the dependency and trying to load the dataset via the catalog, I received the following error: `Pandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).`
After some debugging, I discovered that pandas released version `1.4.0` two days ago, and it bumped the mininum version of optional dependency xlrd to `2.0.1` ([source](https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#increased-minimum-versions-for-dependencies), [source2](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L40)). As kedro enforces `xlrd~=1.0` via extra dependency `kedro[pandas.ExcelDataSet]` ([source](https://github.com/kedro-org/kedro/blob/0.17.6/setup.py#L75)), pandas checks that the currently installed version of xlrd is `1.2.0` and raises the error before importing the package ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L133-L152)).
I repeated the same steps using python 3.7, but because pandas 1.4.0 only supports python 3.8+ ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/setup.cfg#L40)), the installed version of pandas was 1.3.5. When I tried to load the dataset, I received no errors (although I got a future warning saying xlrd will not support xlsx files in version >= 2.0)
## Steps to Reproduce
1. Create a virtualenv with python 3.8 and activate it
2. Run `pip install kedro==0.17.6`
3. Run `kedro new`
4. `cd` to project dir
5. Run `kedro install`
6. Replace the line `kedro==0.17.6` with `kedro[pandas.ExcelDataSet]==0.17.6` in src/requirements.in
7. Run `kedro build-reqs && kedro install`
8. Add file `shuttles.xlsx` (from spaceflights tutorial) to folder `data/01_raw/`
9. Add the dataset information to `catalog.yml`:
```yml
shuttles:
type: pandas.ExcelDataSet
filepath: data/01_raw/shuttles.xlsx
```
10. Run `kedro ipython`
11. Run python code: `shuttles = catalog.load("shuttles")`
## Expected Result
The xlsx file should've been loaded into memory as a pandas Dataframe
## Actual Result
I received the following error:
```
DataSetError: Failed while loading data from data set ExcelDataSet(filepath=/.../data/01_raw/shuttles.xlsx, load_args={'engine': xlrd}, protocol=file, save_args={'index': False}, writer_args={'engine': xlsxwriter}).
Pandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).
```
## Your Environment
* Kedro version used (`pip show kedro` or `kedro -V`): 0.17.6
* Python version used (`python -V`): 3.8.12
* Operating system and version: WSL: Ubuntu 20.04
* Pandas version: 1.4.0
* xlrd version: 1.2.0
</issue>
<code>
[start of setup.py]
1 import re
2 from codecs import open
3 from glob import glob
4 from itertools import chain
5 from os import path
6
7 from setuptools import find_packages, setup
8
9 name = "kedro"
10 here = path.abspath(path.dirname(__file__))
11
12
13 PANDAS = "pandas>=0.24"
14 SPARK = "pyspark>=2.2, <4.0"
15 HDFS = "hdfs>=2.5.8, <3.0"
16 S3FS = "s3fs>=0.3.0, <0.5"
17
18 # get package version
19 with open(path.join(here, name, "__init__.py"), encoding="utf-8") as f:
20 result = re.search(r'__version__ = ["\']([^"\']+)', f.read())
21
22 if not result:
23 raise ValueError("Can't find the version in kedro/__init__.py")
24
25 version = result.group(1)
26
27 # get the dependencies and installs
28 with open("requirements.txt", encoding="utf-8") as f:
29 requires = [x.strip() for x in f if x.strip()]
30
31 # get test dependencies and installs
32 with open("test_requirements.txt", encoding="utf-8") as f:
33 test_requires = [x.strip() for x in f if x.strip() and not x.startswith("-r")]
34
35
36 # Get the long description from the README file
37 with open(path.join(here, "README.md"), encoding="utf-8") as f:
38 readme = f.read()
39
40 doc_html_files = [
41 name.replace("kedro/", "", 1)
42 for name in glob("kedro/framework/html/**/*", recursive=True)
43 ]
44
45 template_files = []
46 for pattern in ["**/*", "**/.*", "**/.*/**", "**/.*/.**"]:
47 template_files.extend(
48 [
49 name.replace("kedro/", "", 1)
50 for name in glob("kedro/templates/" + pattern, recursive=True)
51 ]
52 )
53
54
55 def _collect_requirements(requires):
56 return sorted(set(chain.from_iterable(requires.values())))
57
58
59 api_require = {"api.APIDataSet": ["requests~=2.20"]}
60 biosequence_require = {"biosequence.BioSequenceDataSet": ["biopython~=1.73"]}
61 dask_require = {
62 "dask.ParquetDataSet": [
63 "dask>=2021.10.0, <2022.01; python_version > '3.6'",
64 "dask[complete]~=2.6; python_version == '3.6'",
65 ]
66 }
67 geopandas_require = {
68 "geopandas.GeoJSONDataSet": ["geopandas>=0.6.0, <1.0", "pyproj>=2.2.0, <3.0"]
69 }
70 matplotlib_require = {"matplotlib.MatplotlibWriter": ["matplotlib>=3.0.3, <4.0"]}
71 holoviews_require = {"holoviews.HoloviewsWriter": ["holoviews~=1.13.0"]}
72 networkx_require = {"networkx.NetworkXDataSet": ["networkx~=2.4"]}
73 pandas_require = {
74 "pandas.CSVDataSet": [PANDAS],
75 "pandas.ExcelDataSet": [PANDAS, "xlrd~=1.0", "xlsxwriter~=1.0"],
76 "pandas.AppendableExcelDataSet": [PANDAS, "openpyxl>=3.0.3, <4.0"],
77 "pandas.FeatherDataSet": [PANDAS],
78 "pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
79 "pandas.GBQQueryDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
80 "pandas.HDFDataSet": [PANDAS, "tables~=3.6.0; platform_system == 'Windows'",
81 "tables~=3.6; platform_system != 'Windows'"],
82 "pandas.JSONDataSet": [PANDAS],
83 "pandas.ParquetDataSet": [PANDAS, "pyarrow>=1.0, <7.0"],
84 "pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
85 "pandas.SQLQueryDataSet": [PANDAS, "SQLAlchemy~=1.2"],
86 "pandas.GenericDataSet": [PANDAS],
87 }
88 pillow_require = {"pillow.ImageDataSet": ["Pillow~=8.0"]}
89 plotly_require = {
90 "plotly.PlotlyDataSet": [PANDAS, "plotly>=4.8.0, <6.0"],
91 "plotly.JSONDataSet": ["plotly>=4.8.0, <6.0"],
92 }
93 spark_require = {
94 "spark.SparkDataSet": [SPARK, HDFS, S3FS],
95 "spark.SparkHiveDataSet": [SPARK, HDFS, S3FS],
96 "spark.SparkJDBCDataSet": [SPARK, HDFS, S3FS],
97 "spark.DeltaTableDataSet": [SPARK, HDFS, S3FS, "delta-spark~=1.0"],
98 }
99 tensorflow_required = {
100 "tensorflow.TensorflowModelDataset": [
101 # currently only TensorFlow V2 supported for saving and loading.
102 # V1 requires HDF5 and serializes differently
103 "tensorflow~=2.0"
104 ]
105 }
106 yaml_require = {"yaml.YAMLDataSet": [PANDAS, "PyYAML>=4.2, <6.0"]}
107
108 extras_require = {
109 "api": _collect_requirements(api_require),
110 "biosequence": _collect_requirements(biosequence_require),
111 "dask": _collect_requirements(dask_require),
112 "docs": [
113 "docutils==0.16",
114 "sphinx~=3.4.3",
115 "sphinx_rtd_theme==0.4.1",
116 "nbsphinx==0.8.1",
117 "nbstripout~=0.4",
118 "recommonmark==0.7.1",
119 "sphinx-autodoc-typehints==1.11.1",
120 "sphinx_copybutton==0.3.1",
121 "ipykernel>=5.3, <7.0",
122 ],
123 "geopandas": _collect_requirements(geopandas_require),
124 "ipython": ["ipython~=7.10"],
125 "matplotlib": _collect_requirements(matplotlib_require),
126 "holoviews": _collect_requirements(holoviews_require),
127 "networkx": _collect_requirements(networkx_require),
128 "notebook_templates": ["nbconvert>=5.3.1, <6.0", "nbformat~=4.4"],
129 "pandas": _collect_requirements(pandas_require),
130 "pillow": _collect_requirements(pillow_require),
131 "plotly": _collect_requirements(plotly_require),
132 "profilers": ["memory_profiler>=0.50.0, <1.0"],
133 "spark": _collect_requirements(spark_require),
134 "tensorflow": _collect_requirements(tensorflow_required),
135 "yaml": _collect_requirements(yaml_require),
136 **api_require,
137 **biosequence_require,
138 **dask_require,
139 **geopandas_require,
140 **matplotlib_require,
141 **holoviews_require,
142 **networkx_require,
143 **pandas_require,
144 **pillow_require,
145 **plotly_require,
146 **spark_require,
147 **tensorflow_required,
148 **yaml_require,
149 }
150
151 extras_require["all"] = _collect_requirements(extras_require)
152
153 setup(
154 name=name,
155 version=version,
156 description="Kedro helps you build production-ready data and analytics pipelines",
157 license="Apache Software License (Apache 2.0)",
158 long_description=readme,
159 long_description_content_type="text/markdown",
160 url="https://github.com/kedro-org/kedro",
161 python_requires=">=3.6, <3.9",
162 packages=find_packages(exclude=["docs*", "tests*", "tools*", "features*"]),
163 include_package_data=True,
164 tests_require=test_requires,
165 install_requires=requires,
166 author="Kedro",
167 entry_points={"console_scripts": ["kedro = kedro.framework.cli:main"]},
168 package_data={
169 name: ["py.typed", "test_requirements.txt"] + template_files + doc_html_files
170 },
171 zip_safe=False,
172 keywords="pipelines, machine learning, data pipelines, data science, data engineering",
173 classifiers=[
174 "Development Status :: 4 - Beta",
175 "Programming Language :: Python :: 3.6",
176 "Programming Language :: Python :: 3.7",
177 "Programming Language :: Python :: 3.8",
178 ],
179 extras_require=extras_require,
180 )
181
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
here = path.abspath(path.dirname(__file__))
-PANDAS = "pandas>=0.24"
+PANDAS = "pandas>=0.24, <1.4"
SPARK = "pyspark>=2.2, <4.0"
HDFS = "hdfs>=2.5.8, <3.0"
S3FS = "s3fs>=0.3.0, <0.5"
@@ -77,8 +77,11 @@
"pandas.FeatherDataSet": [PANDAS],
"pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
"pandas.GBQQueryDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
- "pandas.HDFDataSet": [PANDAS, "tables~=3.6.0; platform_system == 'Windows'",
- "tables~=3.6; platform_system != 'Windows'"],
+ "pandas.HDFDataSet": [
+ PANDAS,
+ "tables~=3.6.0; platform_system == 'Windows'",
+ "tables~=3.6; platform_system != 'Windows'",
+ ],
"pandas.JSONDataSet": [PANDAS],
"pandas.ParquetDataSet": [PANDAS, "pyarrow>=1.0, <7.0"],
"pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n here = path.abspath(path.dirname(__file__))\n \n \n-PANDAS = \"pandas>=0.24\"\n+PANDAS = \"pandas>=0.24, <1.4\"\n SPARK = \"pyspark>=2.2, <4.0\"\n HDFS = \"hdfs>=2.5.8, <3.0\"\n S3FS = \"s3fs>=0.3.0, <0.5\"\n@@ -77,8 +77,11 @@\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.GBQQueryDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n- \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6.0; platform_system == 'Windows'\",\n- \"tables~=3.6; platform_system != 'Windows'\"],\n+ \"pandas.HDFDataSet\": [\n+ PANDAS,\n+ \"tables~=3.6.0; platform_system == 'Windows'\",\n+ \"tables~=3.6; platform_system != 'Windows'\",\n+ ],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n", "issue": "ExcelDataSet not working after pandas 1.4.0 release (python 3.8)\n## Description\r\n\r\nOn a fresh new project, after installing the extra dependency `kedro[pandas.ExcelDataSet]`, the user receives a runtime error when trying to load a xslx file dataset via the data catalog. The cause of the issue seems to be an incompatibility with the new pandas `v1.4.0` and `xlrd~=1.0`, the latter being enforced by kedro `17.6.0`.\r\n\r\n## Context\r\n\r\nThe error occurred while I was following the exact steps of the spaceflights tutorial. In order to load the `shuttles` dataset (stored as a xslx file), I was instructed to install the extra dependency `kedro[pandas.ExcelDataSet]`. After installing the dependency and trying to load the dataset via the catalog, I received the following error: `Pandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).`\r\n\r\nAfter some debugging, I discovered that pandas released version `1.4.0` two days ago, and it bumped the mininum version of optional dependency xlrd to `2.0.1` ([source](https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#increased-minimum-versions-for-dependencies), [source2](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L40)). As kedro enforces `xlrd~=1.0` via extra dependency `kedro[pandas.ExcelDataSet]` ([source](https://github.com/kedro-org/kedro/blob/0.17.6/setup.py#L75)), pandas checks that the currently installed version of xlrd is `1.2.0` and raises the error before importing the package ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L133-L152)).\r\n\r\nI repeated the same steps using python 3.7, but because pandas 1.4.0 only supports python 3.8+ ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/setup.cfg#L40)), the installed version of pandas was 1.3.5. When I tried to load the dataset, I received no errors (although I got a future warning saying xlrd will not support xlsx files in version >= 2.0)\r\n\r\n## Steps to Reproduce\r\n1. Create a virtualenv with python 3.8 and activate it\r\n2. Run `pip install kedro==0.17.6`\r\n3. Run `kedro new`\r\n4. `cd` to project dir\r\n5. Run `kedro install`\r\n6. Replace the line `kedro==0.17.6` with `kedro[pandas.ExcelDataSet]==0.17.6` in src/requirements.in\r\n7. Run `kedro build-reqs && kedro install`\r\n8. Add file `shuttles.xlsx` (from spaceflights tutorial) to folder `data/01_raw/`\r\n9. Add the dataset information to `catalog.yml`:\r\n ```yml\r\n shuttles:\r\n type: pandas.ExcelDataSet\r\n filepath: data/01_raw/shuttles.xlsx\r\n ```\r\n10. Run `kedro ipython`\r\n11. Run python code: `shuttles = catalog.load(\"shuttles\")`\r\n\r\n## Expected Result\r\nThe xlsx file should've been loaded into memory as a pandas Dataframe\r\n\r\n## Actual Result\r\nI received the following error:\r\n\r\n```\r\nDataSetError: Failed while loading data from data set ExcelDataSet(filepath=/.../data/01_raw/shuttles.xlsx, load_args={'engine': xlrd}, protocol=file, save_args={'index': False}, writer_args={'engine': xlsxwriter}).\r\nPandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).\r\n```\r\n\r\n## Your Environment\r\n\r\n* Kedro version used (`pip show kedro` or `kedro -V`): 0.17.6\r\n* Python version used (`python -V`): 3.8.12\r\n* Operating system and version: WSL: Ubuntu 20.04\r\n* Pandas version: 1.4.0\r\n* xlrd version: 1.2.0\r\n\n", "before_files": [{"content": "import re\nfrom codecs import open\nfrom glob import glob\nfrom itertools import chain\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nname = \"kedro\"\nhere = path.abspath(path.dirname(__file__))\n\n\nPANDAS = \"pandas>=0.24\"\nSPARK = \"pyspark>=2.2, <4.0\"\nHDFS = \"hdfs>=2.5.8, <3.0\"\nS3FS = \"s3fs>=0.3.0, <0.5\"\n\n# get package version\nwith open(path.join(here, name, \"__init__.py\"), encoding=\"utf-8\") as f:\n result = re.search(r'__version__ = [\"\\']([^\"\\']+)', f.read())\n\n if not result:\n raise ValueError(\"Can't find the version in kedro/__init__.py\")\n\n version = result.group(1)\n\n# get the dependencies and installs\nwith open(\"requirements.txt\", encoding=\"utf-8\") as f:\n requires = [x.strip() for x in f if x.strip()]\n\n# get test dependencies and installs\nwith open(\"test_requirements.txt\", encoding=\"utf-8\") as f:\n test_requires = [x.strip() for x in f if x.strip() and not x.startswith(\"-r\")]\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = f.read()\n\ndoc_html_files = [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/framework/html/**/*\", recursive=True)\n]\n\ntemplate_files = []\nfor pattern in [\"**/*\", \"**/.*\", \"**/.*/**\", \"**/.*/.**\"]:\n template_files.extend(\n [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/templates/\" + pattern, recursive=True)\n ]\n )\n\n\ndef _collect_requirements(requires):\n return sorted(set(chain.from_iterable(requires.values())))\n\n\napi_require = {\"api.APIDataSet\": [\"requests~=2.20\"]}\nbiosequence_require = {\"biosequence.BioSequenceDataSet\": [\"biopython~=1.73\"]}\ndask_require = {\n \"dask.ParquetDataSet\": [\n \"dask>=2021.10.0, <2022.01; python_version > '3.6'\",\n \"dask[complete]~=2.6; python_version == '3.6'\",\n ]\n}\ngeopandas_require = {\n \"geopandas.GeoJSONDataSet\": [\"geopandas>=0.6.0, <1.0\", \"pyproj>=2.2.0, <3.0\"]\n}\nmatplotlib_require = {\"matplotlib.MatplotlibWriter\": [\"matplotlib>=3.0.3, <4.0\"]}\nholoviews_require = {\"holoviews.HoloviewsWriter\": [\"holoviews~=1.13.0\"]}\nnetworkx_require = {\"networkx.NetworkXDataSet\": [\"networkx~=2.4\"]}\npandas_require = {\n \"pandas.CSVDataSet\": [PANDAS],\n \"pandas.ExcelDataSet\": [PANDAS, \"xlrd~=1.0\", \"xlsxwriter~=1.0\"],\n \"pandas.AppendableExcelDataSet\": [PANDAS, \"openpyxl>=3.0.3, <4.0\"],\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.GBQQueryDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6.0; platform_system == 'Windows'\",\n \"tables~=3.6; platform_system != 'Windows'\"],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n}\npillow_require = {\"pillow.ImageDataSet\": [\"Pillow~=8.0\"]}\nplotly_require = {\n \"plotly.PlotlyDataSet\": [PANDAS, \"plotly>=4.8.0, <6.0\"],\n \"plotly.JSONDataSet\": [\"plotly>=4.8.0, <6.0\"],\n}\nspark_require = {\n \"spark.SparkDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkHiveDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkJDBCDataSet\": [SPARK, HDFS, S3FS],\n \"spark.DeltaTableDataSet\": [SPARK, HDFS, S3FS, \"delta-spark~=1.0\"],\n}\ntensorflow_required = {\n \"tensorflow.TensorflowModelDataset\": [\n # currently only TensorFlow V2 supported for saving and loading.\n # V1 requires HDF5 and serializes differently\n \"tensorflow~=2.0\"\n ]\n}\nyaml_require = {\"yaml.YAMLDataSet\": [PANDAS, \"PyYAML>=4.2, <6.0\"]}\n\nextras_require = {\n \"api\": _collect_requirements(api_require),\n \"biosequence\": _collect_requirements(biosequence_require),\n \"dask\": _collect_requirements(dask_require),\n \"docs\": [\n \"docutils==0.16\",\n \"sphinx~=3.4.3\",\n \"sphinx_rtd_theme==0.4.1\",\n \"nbsphinx==0.8.1\",\n \"nbstripout~=0.4\",\n \"recommonmark==0.7.1\",\n \"sphinx-autodoc-typehints==1.11.1\",\n \"sphinx_copybutton==0.3.1\",\n \"ipykernel>=5.3, <7.0\",\n ],\n \"geopandas\": _collect_requirements(geopandas_require),\n \"ipython\": [\"ipython~=7.10\"],\n \"matplotlib\": _collect_requirements(matplotlib_require),\n \"holoviews\": _collect_requirements(holoviews_require),\n \"networkx\": _collect_requirements(networkx_require),\n \"notebook_templates\": [\"nbconvert>=5.3.1, <6.0\", \"nbformat~=4.4\"],\n \"pandas\": _collect_requirements(pandas_require),\n \"pillow\": _collect_requirements(pillow_require),\n \"plotly\": _collect_requirements(plotly_require),\n \"profilers\": [\"memory_profiler>=0.50.0, <1.0\"],\n \"spark\": _collect_requirements(spark_require),\n \"tensorflow\": _collect_requirements(tensorflow_required),\n \"yaml\": _collect_requirements(yaml_require),\n **api_require,\n **biosequence_require,\n **dask_require,\n **geopandas_require,\n **matplotlib_require,\n **holoviews_require,\n **networkx_require,\n **pandas_require,\n **pillow_require,\n **plotly_require,\n **spark_require,\n **tensorflow_required,\n **yaml_require,\n}\n\nextras_require[\"all\"] = _collect_requirements(extras_require)\n\nsetup(\n name=name,\n version=version,\n description=\"Kedro helps you build production-ready data and analytics pipelines\",\n license=\"Apache Software License (Apache 2.0)\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/kedro-org/kedro\",\n python_requires=\">=3.6, <3.9\",\n packages=find_packages(exclude=[\"docs*\", \"tests*\", \"tools*\", \"features*\"]),\n include_package_data=True,\n tests_require=test_requires,\n install_requires=requires,\n author=\"Kedro\",\n entry_points={\"console_scripts\": [\"kedro = kedro.framework.cli:main\"]},\n package_data={\n name: [\"py.typed\", \"test_requirements.txt\"] + template_files + doc_html_files\n },\n zip_safe=False,\n keywords=\"pipelines, machine learning, data pipelines, data science, data engineering\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
| 3,853 | 379 |
gh_patches_debug_20070
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-5020
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Potential new `datetime` issues
### Describe the issue
See CI failures
* https://github.com/ietf-tools/datatracker/actions/runs/3899998769/jobs/6660186805
* https://github.com/ietf-tools/datatracker/actions/runs/3900011570/jobs/6660211516
There is also this one, which is different but possibly related:
* https://github.com/ietf-tools/datatracker/actions/runs/3900217767/jobs/6660629415
CC @jennifer-richards
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/iesg/agenda.py]
1 # Copyright The IETF Trust 2013-2020, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4
5 # utilities for constructing agendas for IESG telechats
6
7 import io
8 import datetime
9 from collections import OrderedDict
10
11 from django.conf import settings
12 from django.http import Http404
13
14 import debug # pyflakes:ignore
15
16 from ietf.doc.models import Document, LastCallDocEvent, ConsensusDocEvent
17 from ietf.doc.utils_search import fill_in_telechat_date
18 from ietf.iesg.models import TelechatDate, TelechatAgendaItem
19 from ietf.review.utils import review_assignments_to_list_for_docs
20 from ietf.utils.timezone import date_today
21
22 def get_agenda_date(date=None):
23 if not date:
24 try:
25 return TelechatDate.objects.active().order_by('date')[0].date
26 except IndexError:
27 return date_today()
28 else:
29 try:
30 return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, "%Y-%m-%d").date()).date
31 except (ValueError, TelechatDate.DoesNotExist):
32 raise Http404
33
34 def get_doc_section(doc):
35 if doc.type_id == 'draft':
36 if doc.intended_std_level_id in ["bcp", "ds", "ps", "std"]:
37 s = "2"
38 else:
39 s = "3"
40
41 g = doc.group_acronym()
42 if g and str(g) != 'none':
43 s += ".1"
44 elif s == "3" and doc.stream_id in ("ise","irtf"):
45 s += ".3"
46 else:
47 s += ".2"
48 if doc.get_state_slug() != "rfc" and doc.get_state_slug('draft-iesg') not in ("lc", "writeupw", "goaheadw", "iesg-eva", "defer", "approved", "ann", "rfcqueue", "pub"):
49 s += ".3"
50 elif doc.returning_item():
51 s += ".2"
52 else:
53 s += ".1"
54
55 elif doc.type_id == 'charter':
56 s = "4"
57 if doc.group.state_id in ('active', 'dormant'):
58 s += ".2"
59 else:
60 s += ".1"
61 if doc.get_state_slug() in ('extrev', 'iesgrev'):
62 s += '.2'
63 else:
64 s += '.1'
65
66 elif doc.type_id == 'statchg':
67 protocol_action = False
68 for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):
69 if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'):
70 protocol_action = True
71 if protocol_action:
72 s = "2.3"
73 else:
74 s = "3.3"
75 if doc.get_state_slug() not in ("iesgeval", "defer", "appr-pr", "appr-pend", "appr-sent"):
76 s += ".3"
77 elif doc.returning_item():
78 s += ".2"
79 else:
80 s += ".1"
81
82 elif doc.type_id == 'conflrev':
83 if not doc.get_state('conflrev') or doc.get_state('conflrev').slug not in ('adrev','iesgeval','appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent','defer'):
84 s = "3.4.3"
85 elif doc.returning_item():
86 s = "3.4.2"
87 else:
88 s = "3.4.1"
89
90 return s
91
92 def agenda_sections():
93 return OrderedDict([
94 ('1', {'title':"Administrivia"}),
95 ('1.1', {'title':"Roll call"}),
96 ('1.2', {'title':"Bash the agenda"}),
97 ('1.3', {'title':"Approval of the minutes of past telechats"}),
98 ('1.4', {'title':"List of remaining action items from last telechat"}),
99 ('2', {'title':"Protocol actions"}),
100 ('2.1', {'title':"WG submissions"}),
101 ('2.1.1', {'title':"New items", 'docs': []}),
102 ('2.1.2', {'title':"Returning items", 'docs':[]}),
103 ('2.1.3', {'title':"For action", 'docs':[]}),
104 ('2.2', {'title':"Individual submissions"}),
105 ('2.2.1', {'title':"New items", 'docs':[]}),
106 ('2.2.2', {'title':"Returning items", 'docs':[]}),
107 ('2.2.3', {'title':"For action", 'docs':[]}),
108 ('2.3', {'title':"Status changes"}),
109 ('2.3.1', {'title':"New items", 'docs':[]}),
110 ('2.3.2', {'title':"Returning items", 'docs':[]}),
111 ('2.3.3', {'title':"For action", 'docs':[]}),
112 ('3', {'title':"Document actions"}),
113 ('3.1', {'title':"WG submissions"}),
114 ('3.1.1', {'title':"New items", 'docs':[]}),
115 ('3.1.2', {'title':"Returning items", 'docs':[]}),
116 ('3.1.3', {'title':"For action", 'docs':[]}),
117 ('3.2', {'title':"Individual submissions via AD"}),
118 ('3.2.1', {'title':"New items", 'docs':[]}),
119 ('3.2.2', {'title':"Returning items", 'docs':[]}),
120 ('3.2.3', {'title':"For action", 'docs':[]}),
121 ('3.3', {'title':"Status changes"}),
122 ('3.3.1', {'title':"New items", 'docs':[]}),
123 ('3.3.2', {'title':"Returning items", 'docs':[]}),
124 ('3.3.3', {'title':"For action", 'docs':[]}),
125 ('3.4', {'title':"IRTF and Independent Submission stream documents"}),
126 ('3.4.1', {'title':"New items", 'docs':[]}),
127 ('3.4.2', {'title':"Returning items", 'docs':[]}),
128 ('3.4.3', {'title':"For action", 'docs':[]}),
129 ('4', {'title':"Working Group actions"}),
130 ('4.1', {'title':"WG creation"}),
131 ('4.1.1', {'title':"Proposed for IETF review", 'docs':[]}),
132 ('4.1.2', {'title':"Proposed for approval", 'docs':[]}),
133 ('4.2', {'title':"WG rechartering"}),
134 ('4.2.1', {'title':"Under evaluation for IETF review", 'docs':[]}),
135 ('4.2.2', {'title':"Proposed for approval", 'docs':[]}),
136 ('5', {'title':"IAB news we can use"}),
137 ('6', {'title':"Management issues"}),
138 ('7', {'title':"Any Other Business (WG News, New Proposals, etc.)"}),
139 ])
140
141 def fill_in_agenda_administrivia(date, sections):
142 extra_info_files = (
143 ("1.1", "roll_call", settings.IESG_ROLL_CALL_FILE),
144 ("1.3", "minutes", settings.IESG_MINUTES_FILE),
145 ("1.4", "action_items", settings.IESG_TASK_FILE),
146 )
147
148 for s, key, filename in extra_info_files:
149 try:
150 with io.open(filename, 'r', encoding='utf-8', errors='replace') as f:
151 t = f.read().strip()
152 except IOError:
153 t = "(Error reading %s)" % filename
154
155 sections[s]["text"] = t
156
157 def fill_in_agenda_docs(date, sections, docs=None):
158 if not docs:
159 docs = Document.objects.filter(docevent__telechatdocevent__telechat_date=date)
160 docs = docs.select_related("stream", "group").distinct()
161 fill_in_telechat_date(docs)
162
163 review_assignments_for_docs = review_assignments_to_list_for_docs(docs)
164
165 for doc in docs:
166 if doc.telechat_date() != date:
167 continue
168
169 if not hasattr(doc, 'balloting_started'):
170 e = doc.latest_event(type="started_iesg_process")
171 doc.balloting_started = e.time if e else datetime.datetime.min
172
173 if doc.type_id == "draft":
174 s = doc.get_state("draft-iana-review")
175 if s: # and s.slug in ("not-ok", "changed", "need-rev"):
176 doc.iana_review_state = str(s)
177
178 if doc.get_state_slug("draft-iesg") == "lc":
179 e = doc.latest_event(LastCallDocEvent, type="sent_last_call")
180 if e:
181 doc.lastcall_expires = e.expires
182
183 if doc.stream_id in ("ietf", "irtf", "iab"):
184 doc.consensus = "Unknown"
185 e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
186 if e and (e.consensus != None):
187 doc.consensus = "Yes" if e.consensus else "No"
188
189 doc.review_assignments = review_assignments_for_docs.get(doc.name, [])
190 elif doc.type_id == "conflrev":
191 doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document
192 elif doc.type_id == "charter":
193 pass
194
195 number = get_doc_section(doc)
196 if number: # and num in sections
197 sections[number]["docs"].append(doc)
198
199 # prune empty "For action" sections
200 empty_for_action = [n for n, section in sections.items()
201 if section["title"] == "For action" and not section["docs"]]
202 for num in empty_for_action:
203 del sections[num]
204
205 # Be careful to keep this the same as what's used in agenda_documents
206 for s in sections.values():
207 if "docs" in s:
208 s["docs"].sort(key=lambda d: d.balloting_started)
209
210 def fill_in_agenda_management_issues(date, sections):
211 s = "6.%s"
212 for i, item in enumerate(TelechatAgendaItem.objects.filter(type=3).order_by('id'), start=1):
213 sections[s % i] = { "title": item.title, "text": item.text }
214
215 def agenda_data(date=None):
216 """Return a dict with the different IESG telechat agenda components."""
217 date = get_agenda_date(date)
218 sections = agenda_sections()
219
220 fill_in_agenda_administrivia(date, sections)
221 fill_in_agenda_docs(date, sections)
222 fill_in_agenda_management_issues(date, sections)
223
224 return { 'date': date.isoformat(), 'sections': sections }
[end of ietf/iesg/agenda.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py
--- a/ietf/iesg/agenda.py
+++ b/ietf/iesg/agenda.py
@@ -17,7 +17,7 @@
from ietf.doc.utils_search import fill_in_telechat_date
from ietf.iesg.models import TelechatDate, TelechatAgendaItem
from ietf.review.utils import review_assignments_to_list_for_docs
-from ietf.utils.timezone import date_today
+from ietf.utils.timezone import date_today, make_aware
def get_agenda_date(date=None):
if not date:
@@ -26,8 +26,9 @@
except IndexError:
return date_today()
else:
+ parsed_date = make_aware(datetime.datetime.strptime(date, "%Y-%m-%d"), settings.TIME_ZONE).date()
try:
- return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, "%Y-%m-%d").date()).date
+ return TelechatDate.objects.active().get(date=parsed_date).date
except (ValueError, TelechatDate.DoesNotExist):
raise Http404
|
{"golden_diff": "diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py\n--- a/ietf/iesg/agenda.py\n+++ b/ietf/iesg/agenda.py\n@@ -17,7 +17,7 @@\n from ietf.doc.utils_search import fill_in_telechat_date\n from ietf.iesg.models import TelechatDate, TelechatAgendaItem\n from ietf.review.utils import review_assignments_to_list_for_docs\n-from ietf.utils.timezone import date_today\n+from ietf.utils.timezone import date_today, make_aware\n \n def get_agenda_date(date=None):\n if not date:\n@@ -26,8 +26,9 @@\n except IndexError:\n return date_today()\n else:\n+ parsed_date = make_aware(datetime.datetime.strptime(date, \"%Y-%m-%d\"), settings.TIME_ZONE).date()\n try:\n- return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, \"%Y-%m-%d\").date()).date\n+ return TelechatDate.objects.active().get(date=parsed_date).date\n except (ValueError, TelechatDate.DoesNotExist):\n raise Http404\n", "issue": "Potential new `datetime` issues\n### Describe the issue\r\n\r\nSee CI failures\r\n* https://github.com/ietf-tools/datatracker/actions/runs/3899998769/jobs/6660186805\r\n* https://github.com/ietf-tools/datatracker/actions/runs/3900011570/jobs/6660211516\r\n\r\nThere is also this one, which is different but possibly related:\r\n* https://github.com/ietf-tools/datatracker/actions/runs/3900217767/jobs/6660629415\r\n\r\nCC @jennifer-richards \r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2013-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\n# utilities for constructing agendas for IESG telechats\n\nimport io\nimport datetime\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.http import Http404\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import Document, LastCallDocEvent, ConsensusDocEvent\nfrom ietf.doc.utils_search import fill_in_telechat_date\nfrom ietf.iesg.models import TelechatDate, TelechatAgendaItem\nfrom ietf.review.utils import review_assignments_to_list_for_docs\nfrom ietf.utils.timezone import date_today\n\ndef get_agenda_date(date=None):\n if not date:\n try:\n return TelechatDate.objects.active().order_by('date')[0].date\n except IndexError:\n return date_today()\n else:\n try:\n return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, \"%Y-%m-%d\").date()).date\n except (ValueError, TelechatDate.DoesNotExist):\n raise Http404\n\ndef get_doc_section(doc):\n if doc.type_id == 'draft':\n if doc.intended_std_level_id in [\"bcp\", \"ds\", \"ps\", \"std\"]:\n s = \"2\"\n else:\n s = \"3\"\n\n g = doc.group_acronym()\n if g and str(g) != 'none':\n s += \".1\"\n elif s == \"3\" and doc.stream_id in (\"ise\",\"irtf\"):\n s += \".3\"\n else:\n s += \".2\"\n if doc.get_state_slug() != \"rfc\" and doc.get_state_slug('draft-iesg') not in (\"lc\", \"writeupw\", \"goaheadw\", \"iesg-eva\", \"defer\", \"approved\", \"ann\", \"rfcqueue\", \"pub\"):\n s += \".3\"\n elif doc.returning_item():\n s += \".2\"\n else:\n s += \".1\"\n\n elif doc.type_id == 'charter':\n s = \"4\"\n if doc.group.state_id in ('active', 'dormant'):\n s += \".2\"\n else:\n s += \".1\"\n if doc.get_state_slug() in ('extrev', 'iesgrev'):\n s += '.2'\n else:\n s += '.1'\n\n elif doc.type_id == 'statchg':\n protocol_action = False\n for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):\n if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'):\n protocol_action = True\n if protocol_action:\n s = \"2.3\"\n else:\n s = \"3.3\"\n if doc.get_state_slug() not in (\"iesgeval\", \"defer\", \"appr-pr\", \"appr-pend\", \"appr-sent\"):\n s += \".3\"\n elif doc.returning_item():\n s += \".2\"\n else:\n s += \".1\"\n\n elif doc.type_id == 'conflrev':\n if not doc.get_state('conflrev') or doc.get_state('conflrev').slug not in ('adrev','iesgeval','appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent','defer'):\n s = \"3.4.3\"\n elif doc.returning_item():\n s = \"3.4.2\"\n else:\n s = \"3.4.1\"\n\n return s\n\ndef agenda_sections():\n return OrderedDict([\n ('1', {'title':\"Administrivia\"}),\n ('1.1', {'title':\"Roll call\"}),\n ('1.2', {'title':\"Bash the agenda\"}),\n ('1.3', {'title':\"Approval of the minutes of past telechats\"}),\n ('1.4', {'title':\"List of remaining action items from last telechat\"}),\n ('2', {'title':\"Protocol actions\"}),\n ('2.1', {'title':\"WG submissions\"}),\n ('2.1.1', {'title':\"New items\", 'docs': []}),\n ('2.1.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.1.3', {'title':\"For action\", 'docs':[]}),\n ('2.2', {'title':\"Individual submissions\"}),\n ('2.2.1', {'title':\"New items\", 'docs':[]}),\n ('2.2.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.2.3', {'title':\"For action\", 'docs':[]}),\n ('2.3', {'title':\"Status changes\"}),\n ('2.3.1', {'title':\"New items\", 'docs':[]}),\n ('2.3.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.3.3', {'title':\"For action\", 'docs':[]}),\n ('3', {'title':\"Document actions\"}),\n ('3.1', {'title':\"WG submissions\"}),\n ('3.1.1', {'title':\"New items\", 'docs':[]}),\n ('3.1.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.1.3', {'title':\"For action\", 'docs':[]}),\n ('3.2', {'title':\"Individual submissions via AD\"}),\n ('3.2.1', {'title':\"New items\", 'docs':[]}),\n ('3.2.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.2.3', {'title':\"For action\", 'docs':[]}),\n ('3.3', {'title':\"Status changes\"}),\n ('3.3.1', {'title':\"New items\", 'docs':[]}),\n ('3.3.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.3.3', {'title':\"For action\", 'docs':[]}),\n ('3.4', {'title':\"IRTF and Independent Submission stream documents\"}),\n ('3.4.1', {'title':\"New items\", 'docs':[]}),\n ('3.4.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.4.3', {'title':\"For action\", 'docs':[]}),\n ('4', {'title':\"Working Group actions\"}),\n ('4.1', {'title':\"WG creation\"}),\n ('4.1.1', {'title':\"Proposed for IETF review\", 'docs':[]}),\n ('4.1.2', {'title':\"Proposed for approval\", 'docs':[]}),\n ('4.2', {'title':\"WG rechartering\"}),\n ('4.2.1', {'title':\"Under evaluation for IETF review\", 'docs':[]}),\n ('4.2.2', {'title':\"Proposed for approval\", 'docs':[]}),\n ('5', {'title':\"IAB news we can use\"}),\n ('6', {'title':\"Management issues\"}),\n ('7', {'title':\"Any Other Business (WG News, New Proposals, etc.)\"}),\n ])\n\ndef fill_in_agenda_administrivia(date, sections):\n extra_info_files = (\n (\"1.1\", \"roll_call\", settings.IESG_ROLL_CALL_FILE),\n (\"1.3\", \"minutes\", settings.IESG_MINUTES_FILE),\n (\"1.4\", \"action_items\", settings.IESG_TASK_FILE),\n )\n\n for s, key, filename in extra_info_files:\n try:\n with io.open(filename, 'r', encoding='utf-8', errors='replace') as f:\n t = f.read().strip()\n except IOError:\n t = \"(Error reading %s)\" % filename\n\n sections[s][\"text\"] = t\n\ndef fill_in_agenda_docs(date, sections, docs=None):\n if not docs:\n docs = Document.objects.filter(docevent__telechatdocevent__telechat_date=date)\n docs = docs.select_related(\"stream\", \"group\").distinct()\n fill_in_telechat_date(docs)\n\n review_assignments_for_docs = review_assignments_to_list_for_docs(docs)\n\n for doc in docs:\n if doc.telechat_date() != date:\n continue\n\n if not hasattr(doc, 'balloting_started'):\n e = doc.latest_event(type=\"started_iesg_process\")\n doc.balloting_started = e.time if e else datetime.datetime.min\n\n if doc.type_id == \"draft\":\n s = doc.get_state(\"draft-iana-review\")\n if s: # and s.slug in (\"not-ok\", \"changed\", \"need-rev\"):\n doc.iana_review_state = str(s)\n\n if doc.get_state_slug(\"draft-iesg\") == \"lc\":\n e = doc.latest_event(LastCallDocEvent, type=\"sent_last_call\")\n if e:\n doc.lastcall_expires = e.expires\n\n if doc.stream_id in (\"ietf\", \"irtf\", \"iab\"):\n doc.consensus = \"Unknown\"\n e = doc.latest_event(ConsensusDocEvent, type=\"changed_consensus\")\n if e and (e.consensus != None):\n doc.consensus = \"Yes\" if e.consensus else \"No\"\n\n doc.review_assignments = review_assignments_for_docs.get(doc.name, [])\n elif doc.type_id == \"conflrev\":\n doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document\n elif doc.type_id == \"charter\":\n pass\n\n number = get_doc_section(doc)\n if number: # and num in sections\n sections[number][\"docs\"].append(doc)\n\n # prune empty \"For action\" sections\n empty_for_action = [n for n, section in sections.items()\n if section[\"title\"] == \"For action\" and not section[\"docs\"]]\n for num in empty_for_action:\n del sections[num]\n\n # Be careful to keep this the same as what's used in agenda_documents\n for s in sections.values():\n if \"docs\" in s:\n s[\"docs\"].sort(key=lambda d: d.balloting_started)\n\ndef fill_in_agenda_management_issues(date, sections):\n s = \"6.%s\"\n for i, item in enumerate(TelechatAgendaItem.objects.filter(type=3).order_by('id'), start=1):\n sections[s % i] = { \"title\": item.title, \"text\": item.text }\n\ndef agenda_data(date=None):\n \"\"\"Return a dict with the different IESG telechat agenda components.\"\"\"\n date = get_agenda_date(date)\n sections = agenda_sections()\n\n fill_in_agenda_administrivia(date, sections)\n fill_in_agenda_docs(date, sections)\n fill_in_agenda_management_issues(date, sections)\n\n return { 'date': date.isoformat(), 'sections': sections }", "path": "ietf/iesg/agenda.py"}]}
| 3,708 | 257 |
gh_patches_debug_55973
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4994
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests folder published in version 2.3.201
The wheel of version 2.3.201 contains the tests folder and pollutes the site-packages even though setup.py excludes it explicitly.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import json
3 import logging
4 import os
5 from importlib import util
6 from os import path
7 from pathlib import Path
8
9 from setuptools import setup, find_packages
10 from setuptools.command.build_py import build_py
11
12
13 class PreBuildCommand(build_py):
14 """Pre-build command"""
15
16 def transform_graph_yaml_to_json(self) -> None:
17 """Transforms YAML graph checks to JSON and copies them to build/lib"""
18
19 import yaml # can't be top-level, because it needs to be first installed via 'setup_requires'
20
21 graph_check_paths = (
22 "checkov/*/checks/graph_checks",
23 )
24 build_path = Path(self.build_lib)
25 src_path = Path()
26
27 for graph_check_path in graph_check_paths:
28 for yaml_file in src_path.glob(f"{graph_check_path}/**/*.yaml"):
29 json_file = (build_path / yaml_file).with_suffix(".json")
30 self.mkpath(str(json_file.parent))
31 json_file.write_text(json.dumps(yaml.safe_load(yaml_file.read_text())))
32
33 def run(self) -> None:
34 self.execute(self.transform_graph_yaml_to_json, ())
35 build_py.run(self)
36
37
38 # read the contents of your README file
39 this_directory = path.abspath(path.dirname(__file__))
40 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
41 long_description = f.read()
42
43 logger = logging.getLogger(__name__)
44 spec = util.spec_from_file_location(
45 "checkov.version", os.path.join("checkov", "version.py")
46 )
47 # noinspection PyUnresolvedReferences
48 mod = util.module_from_spec(spec)
49 spec.loader.exec_module(mod) # type: ignore
50 version = mod.version # type: ignore
51
52 setup(
53 cmdclass={
54 'build_py': PreBuildCommand,
55 },
56 setup_requires=[
57 "pyyaml",
58 ],
59 extras_require={
60 "dev": [
61 "pytest==5.3.1",
62 "coverage==5.5",
63 "coverage-badge",
64 "GitPython==3.1.7",
65 "bandit",
66 "jsonschema",
67 ]
68 },
69 install_requires=[
70 "bc-python-hcl2==0.3.51",
71 "bc-detect-secrets==1.4.21",
72 "bc-jsonpath-ng==1.5.9",
73 "deep-merge",
74 "tabulate",
75 "colorama",
76 "termcolor",
77 "junit-xml>=1.9",
78 "dpath==2.1.3",
79 "pyyaml>=5.4.1",
80 "boto3>=1.17",
81 "gitpython",
82 "jmespath",
83 "tqdm",
84 "update-checker",
85 "semantic-version",
86 "packaging",
87 "cloudsplaining>=0.4.3",
88 "networkx<2.7",
89 "igraph",
90 "dockerfile-parse",
91 "docker",
92 "configargparse",
93 "argcomplete",
94 "policyuniverse",
95 "typing-extensions>=4.1.0",
96 "importlib-metadata>=0.12",
97 "cachetools",
98 "cyclonedx-python-lib>=2.4.0,<4.0.0",
99 "packageurl-python",
100 "click>=8.0.0",
101 "aiohttp",
102 "aiodns",
103 "aiomultiprocess",
104 "jsonschema<5.0.0,>=4.6.0",
105 "prettytable>=3.0.0",
106 "pycep-parser==0.3.9",
107 "charset-normalizer",
108 "pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'",
109 "pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'",
110 "schema",
111 "requests>=2.27.0",
112 "yarl",
113 "openai",
114 ],
115 dependency_links=[], # keep it empty, needed for pipenv-setup
116 license="Apache License 2.0",
117 name="checkov",
118 version=version,
119 python_requires=">=3.7",
120 description="Infrastructure as code static analysis",
121 author="bridgecrew",
122 author_email="[email protected]",
123 url="https://github.com/bridgecrewio/checkov",
124 packages=find_packages(exclude=[
125 "dogfood_tests*",
126 "integration_tests*",
127 "performance_tests*"
128 "tests*",
129 ]),
130 include_package_data=True,
131 package_data={
132 "checkov": ["py.typed"],
133 "checkov.common.util.templates": ["*.jinja2"],
134 "checkov.ansible.checks.graph_checks": ["**/*.json"],
135 "checkov.arm.checks.graph_checks": ["**/*.json"],
136 "checkov.bicep.checks.graph_checks": ["**/*.json"],
137 "checkov.cloudformation.checks.graph_checks": ["**/*.json"],
138 "checkov.dockerfile.checks.graph_checks": ["**/*.json"],
139 "checkov.github_actions.checks.graph_checks": ["**/*.json"],
140 "checkov.kubernetes.checks.graph_checks": ["**/*.json"],
141 "checkov.terraform.checks.graph_checks": ["**/*.json"],
142 },
143 scripts=["bin/checkov", "bin/checkov.cmd"],
144 long_description=long_description,
145 long_description_content_type="text/markdown",
146 classifiers=[
147 "Environment :: Console",
148 "Intended Audience :: Developers",
149 "Intended Audience :: System Administrators",
150 "License :: OSI Approved :: Apache Software License",
151 "Programming Language :: Python :: 3 :: Only",
152 "Programming Language :: Python :: 3.7",
153 "Programming Language :: Python :: 3.8",
154 "Programming Language :: Python :: 3.9",
155 "Programming Language :: Python :: 3.10",
156 "Programming Language :: Python :: 3.11",
157 "Topic :: Security",
158 "Topic :: Software Development :: Build Tools",
159 "Typing :: Typed",
160 ],
161 )
162
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -123,8 +123,9 @@
url="https://github.com/bridgecrewio/checkov",
packages=find_packages(exclude=[
"dogfood_tests*",
+ "flake8_plugins*",
"integration_tests*",
- "performance_tests*"
+ "performance_tests*",
"tests*",
]),
include_package_data=True,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -123,8 +123,9 @@\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=find_packages(exclude=[\n \"dogfood_tests*\",\n+ \"flake8_plugins*\",\n \"integration_tests*\",\n- \"performance_tests*\"\n+ \"performance_tests*\",\n \"tests*\",\n ]),\n include_package_data=True,\n", "issue": "Tests folder published in version 2.3.201\nThe wheel of version 2.3.201 contains the tests folder and pollutes the site-packages even though setup.py excludes it explicitly.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport json\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\n\n\nclass PreBuildCommand(build_py):\n \"\"\"Pre-build command\"\"\"\n\n def transform_graph_yaml_to_json(self) -> None:\n \"\"\"Transforms YAML graph checks to JSON and copies them to build/lib\"\"\"\n\n import yaml # can't be top-level, because it needs to be first installed via 'setup_requires'\n\n graph_check_paths = (\n \"checkov/*/checks/graph_checks\",\n )\n build_path = Path(self.build_lib)\n src_path = Path()\n\n for graph_check_path in graph_check_paths:\n for yaml_file in src_path.glob(f\"{graph_check_path}/**/*.yaml\"):\n json_file = (build_path / yaml_file).with_suffix(\".json\")\n self.mkpath(str(json_file.parent))\n json_file.write_text(json.dumps(yaml.safe_load(yaml_file.read_text())))\n\n def run(self) -> None:\n self.execute(self.transform_graph_yaml_to_json, ())\n build_py.run(self)\n\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n cmdclass={\n 'build_py': PreBuildCommand,\n },\n setup_requires=[\n \"pyyaml\",\n ],\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.51\",\n \"bc-detect-secrets==1.4.21\",\n \"bc-jsonpath-ng==1.5.9\",\n \"deep-merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath==2.1.3\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"gitpython\",\n \"jmespath\",\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n \"packaging\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"igraph\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"importlib-metadata>=0.12\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0,<4.0.0\",\n \"packageurl-python\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonschema<5.0.0,>=4.6.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.9\",\n \"charset-normalizer\",\n \"pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'\",\n \"pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'\",\n \"schema\",\n \"requests>=2.27.0\",\n \"yarl\",\n \"openai\",\n ],\n dependency_links=[], # keep it empty, needed for pipenv-setup\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=find_packages(exclude=[\n \"dogfood_tests*\",\n \"integration_tests*\",\n \"performance_tests*\"\n \"tests*\",\n ]),\n include_package_data=True,\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.ansible.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.arm.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.bicep.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.cloudformation.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.dockerfile.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.github_actions.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.kubernetes.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.terraform.checks.graph_checks\": [\"**/*.json\"],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]}
| 2,324 | 103 |
gh_patches_debug_7898
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-1889
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend the OTEL traces to track bytes billed and bytes processed
Extend OTEL traces to track bytes billed and bytes processed. Allowing developers to see how much data and cost was consumed as part of BigQueryJob trace
</issue>
<code>
[start of google/cloud/bigquery/opentelemetry_tracing.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from contextlib import contextmanager
17 from google.api_core.exceptions import GoogleAPICallError # type: ignore
18
19 logger = logging.getLogger(__name__)
20 try:
21 from opentelemetry import trace # type: ignore
22 from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore
23 from opentelemetry.trace.status import Status # type: ignore
24
25 HAS_OPENTELEMETRY = True
26 _warned_telemetry = True
27
28 except ImportError:
29 HAS_OPENTELEMETRY = False
30 _warned_telemetry = False
31
32 _default_attributes = {
33 "db.system": "BigQuery"
34 } # static, default values assigned to all spans
35
36
37 @contextmanager
38 def create_span(name, attributes=None, client=None, job_ref=None):
39 """Creates a ContextManager for a Span to be exported to the configured exporter.
40 If no configuration exists yields None.
41
42 Args:
43 name (str): Name that will be set for the span being created
44 attributes (Optional[dict]):
45 Additional attributes that pertain to
46 the specific API call (i.e. not a default attribute)
47 client (Optional[google.cloud.bigquery.client.Client]):
48 Pass in a Client object to extract any attributes that may be
49 relevant to it and add them to the created spans.
50 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
51 Pass in a _AsyncJob object to extract any attributes that may be
52 relevant to it and add them to the created spans.
53
54 Yields:
55 opentelemetry.trace.Span: Yields the newly created Span.
56
57 Raises:
58 google.api_core.exceptions.GoogleAPICallError:
59 Raised if a span could not be yielded or issue with call to
60 OpenTelemetry.
61 """
62 global _warned_telemetry
63 final_attributes = _get_final_span_attributes(attributes, client, job_ref)
64 if not HAS_OPENTELEMETRY:
65 if not _warned_telemetry:
66 logger.debug(
67 "This service is instrumented using OpenTelemetry. "
68 "OpenTelemetry or one of its components could not be imported; "
69 "please add compatible versions of opentelemetry-api and "
70 "opentelemetry-instrumentation packages in order to get BigQuery "
71 "Tracing data."
72 )
73 _warned_telemetry = True
74
75 yield None
76 return
77 tracer = trace.get_tracer(__name__)
78
79 # yield new span value
80 with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
81 try:
82 yield span
83 except GoogleAPICallError as error:
84 if error.code is not None:
85 span.set_status(Status(http_status_to_status_code(error.code)))
86 raise
87
88
89 def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
90 """Compiles attributes from: client, job_ref, user-provided attributes.
91
92 Attributes from all of these sources are merged together. Note the
93 attributes are added sequentially based on perceived order of precedence:
94 i.e. attributes added last may overwrite attributes added earlier.
95
96 Args:
97 attributes (Optional[dict]):
98 Additional attributes that pertain to
99 the specific API call (i.e. not a default attribute)
100
101 client (Optional[google.cloud.bigquery.client.Client]):
102 Pass in a Client object to extract any attributes that may be
103 relevant to it and add them to the final_attributes
104
105 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
106 Pass in a _AsyncJob object to extract any attributes that may be
107 relevant to it and add them to the final_attributes.
108
109 Returns: dict
110 """
111
112 collected_attributes = _default_attributes.copy()
113
114 if client:
115 collected_attributes.update(_set_client_attributes(client))
116 if job_ref:
117 collected_attributes.update(_set_job_attributes(job_ref))
118 if attributes:
119 collected_attributes.update(attributes)
120
121 final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}
122 return final_attributes
123
124
125 def _set_client_attributes(client):
126 return {"db.name": client.project, "location": client.location}
127
128
129 def _set_job_attributes(job_ref):
130 job_attributes = {
131 "db.name": job_ref.project,
132 "job_id": job_ref.job_id,
133 "state": job_ref.state,
134 }
135
136 job_attributes["hasErrors"] = job_ref.error_result is not None
137
138 if job_ref.created is not None:
139 job_attributes["timeCreated"] = job_ref.created.isoformat()
140
141 if job_ref.started is not None:
142 job_attributes["timeStarted"] = job_ref.started.isoformat()
143
144 if job_ref.ended is not None:
145 job_attributes["timeEnded"] = job_ref.ended.isoformat()
146
147 if job_ref.location is not None:
148 job_attributes["location"] = job_ref.location
149
150 if job_ref.parent_job_id is not None:
151 job_attributes["parent_job_id"] = job_ref.parent_job_id
152
153 if job_ref.num_child_jobs is not None:
154 job_attributes["num_child_jobs"] = job_ref.num_child_jobs
155
156 return job_attributes
157
[end of google/cloud/bigquery/opentelemetry_tracing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py
--- a/google/cloud/bigquery/opentelemetry_tracing.py
+++ b/google/cloud/bigquery/opentelemetry_tracing.py
@@ -153,4 +153,12 @@
if job_ref.num_child_jobs is not None:
job_attributes["num_child_jobs"] = job_ref.num_child_jobs
+ total_bytes_billed = getattr(job_ref, "total_bytes_billed", None)
+ if total_bytes_billed is not None:
+ job_attributes["total_bytes_billed"] = total_bytes_billed
+
+ total_bytes_processed = getattr(job_ref, "total_bytes_processed", None)
+ if total_bytes_processed is not None:
+ job_attributes["total_bytes_processed"] = total_bytes_processed
+
return job_attributes
|
{"golden_diff": "diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py\n--- a/google/cloud/bigquery/opentelemetry_tracing.py\n+++ b/google/cloud/bigquery/opentelemetry_tracing.py\n@@ -153,4 +153,12 @@\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n \n+ total_bytes_billed = getattr(job_ref, \"total_bytes_billed\", None)\n+ if total_bytes_billed is not None:\n+ job_attributes[\"total_bytes_billed\"] = total_bytes_billed\n+\n+ total_bytes_processed = getattr(job_ref, \"total_bytes_processed\", None)\n+ if total_bytes_processed is not None:\n+ job_attributes[\"total_bytes_processed\"] = total_bytes_processed\n+\n return job_attributes\n", "issue": "Extend the OTEL traces to track bytes billed and bytes processed\nExtend OTEL traces to track bytes billed and bytes processed. Allowing developers to see how much data and cost was consumed as part of BigQueryJob trace \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n \"\"\"Compiles attributes from: client, job_ref, user-provided attributes.\n\n Attributes from all of these sources are merged together. Note the\n attributes are added sequentially based on perceived order of precedence:\n i.e. attributes added last may overwrite attributes added earlier.\n\n Args:\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the final_attributes\n\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the final_attributes.\n\n Returns: dict\n \"\"\"\n\n collected_attributes = _default_attributes.copy()\n\n if client:\n collected_attributes.update(_set_client_attributes(client))\n if job_ref:\n collected_attributes.update(_set_job_attributes(job_ref))\n if attributes:\n collected_attributes.update(attributes)\n\n final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}]}
| 2,202 | 187 |
gh_patches_debug_2307
|
rasdani/github-patches
|
git_diff
|
huggingface__text-generation-inference-1089
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flash attention (v1) is broken
Defaulting window_size_left to "-1" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).
https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7
</issue>
<code>
[start of server/text_generation_server/utils/flash_attn.py]
1 import os
2 import torch
3
4 from loguru import logger
5
6 if os.getenv("USE_FLASH_ATTENTION", "").lower() == "false":
7 raise ImportError("`USE_FLASH_ATTENTION` is false.")
8
9 if not torch.cuda.is_available():
10 raise ImportError("CUDA is not available")
11
12 major, minor = torch.cuda.get_device_capability()
13 is_sm75 = major == 7 and minor == 5
14 is_sm8x = major == 8 and minor >= 0
15 is_sm90 = major == 9 and minor == 0
16
17 HAS_FLASH_ATTN = False
18 HAS_FLASH_ATTN_V2 = False
19 try:
20 try:
21 import flash_attn_2_cuda
22 except ImportError:
23 raise ImportError(
24 "Flash Attention V2 is not installed.\n"
25 "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
26 "or install flash attention v2 with `cd server && make install install-flash-attention-v2`"
27 )
28 if not (is_sm8x or is_sm90):
29 raise ImportError(
30 f"GPU with CUDA capability {major} {minor} is not supported for "
31 "Flash Attention V2"
32 )
33 HAS_FLASH_ATTN_V2 = True
34 except ImportError as e:
35 try:
36 import flash_attn_cuda
37 except ImportError:
38 raise ImportError(
39 "Flash Attention is not installed.\n"
40 "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
41 "or install flash attention with `cd server && make install install-flash-attention`"
42 ) from e
43
44 if not (is_sm75 or is_sm8x or is_sm90):
45 raise ImportError(
46 f"GPU with CUDA capability {major} {minor} is not supported"
47 ) from e
48 logger.warning(f"Unable to use Flash Attention V2: {e}")
49 HAS_FLASH_ATTN = True
50
51
52 def attention(
53 q,
54 k,
55 v,
56 out,
57 cu_seqlens,
58 max_s,
59 softmax_scale,
60 window_size_left=-1,
61 ):
62 if HAS_FLASH_ATTN_V2:
63 return flash_attn_2_cuda.varlen_fwd(
64 q,
65 k,
66 v,
67 out,
68 cu_seqlens,
69 cu_seqlens,
70 max_s,
71 max_s,
72 0.0,
73 softmax_scale,
74 False,
75 True,
76 window_size_left,
77 0,
78 False,
79 None,
80 )
81
82 if HAS_FLASH_ATTN:
83 if window_size_left != 0:
84 raise NotImplementedError(
85 "window_size_left is only available with flash attn v2"
86 )
87
88 # Flash attention v1 requires q, k and v to have the same number of heads
89 if k.shape[1] != q.shape[1]:
90 # MQA expand
91 if k.shape[1] == 1:
92 k = k.expand(-1, q.shape[1], -1)
93 # Grouped attention reshape
94 else:
95 original_shape = k.shape
96 k = (
97 k.unsqueeze(2)
98 .expand(-1, -1, q.shape[1] // k.shape[1], -1)
99 .reshape(original_shape[0], -1, original_shape[2])
100 )
101 if v.shape[1] != q.shape[1]:
102 # MQA expand
103 if v.shape[1] == 1:
104 v = v.expand(-1, q.shape[1], -1)
105 # Grouped attention reshape
106 else:
107 original_shape = v.shape
108 v = (
109 v.unsqueeze(2)
110 .expand(-1, -1, q.shape[1] // v.shape[1], -1)
111 .reshape(original_shape[0], -1, original_shape[2])
112 )
113
114 return flash_attn_cuda.fwd(
115 q,
116 k,
117 v,
118 out,
119 cu_seqlens,
120 cu_seqlens,
121 max_s,
122 max_s,
123 0.0,
124 softmax_scale,
125 False,
126 True,
127 False,
128 0,
129 None,
130 )
131
132 raise NotImplementedError("flash attention is not installed")
133
[end of server/text_generation_server/utils/flash_attn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py
--- a/server/text_generation_server/utils/flash_attn.py
+++ b/server/text_generation_server/utils/flash_attn.py
@@ -80,7 +80,7 @@
)
if HAS_FLASH_ATTN:
- if window_size_left != 0:
+ if window_size_left != -1:
raise NotImplementedError(
"window_size_left is only available with flash attn v2"
)
|
{"golden_diff": "diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py\n--- a/server/text_generation_server/utils/flash_attn.py\n+++ b/server/text_generation_server/utils/flash_attn.py\n@@ -80,7 +80,7 @@\n )\n \n if HAS_FLASH_ATTN:\n- if window_size_left != 0:\n+ if window_size_left != -1:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n", "issue": "Flash attention (v1) is broken\nDefaulting window_size_left to \"-1\" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).\r\n\r\nhttps://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7\n", "before_files": [{"content": "import os\nimport torch\n\nfrom loguru import logger\n\nif os.getenv(\"USE_FLASH_ATTENTION\", \"\").lower() == \"false\":\n raise ImportError(\"`USE_FLASH_ATTENTION` is false.\")\n\nif not torch.cuda.is_available():\n raise ImportError(\"CUDA is not available\")\n\nmajor, minor = torch.cuda.get_device_capability()\nis_sm75 = major == 7 and minor == 5\nis_sm8x = major == 8 and minor >= 0\nis_sm90 = major == 9 and minor == 0\n\nHAS_FLASH_ATTN = False\nHAS_FLASH_ATTN_V2 = False\ntry:\n try:\n import flash_attn_2_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention V2 is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention v2 with `cd server && make install install-flash-attention-v2`\"\n )\n if not (is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported for \"\n \"Flash Attention V2\"\n )\n HAS_FLASH_ATTN_V2 = True\nexcept ImportError as e:\n try:\n import flash_attn_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention with `cd server && make install install-flash-attention`\"\n ) from e\n\n if not (is_sm75 or is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported\"\n ) from e\n logger.warning(f\"Unable to use Flash Attention V2: {e}\")\n HAS_FLASH_ATTN = True\n\n\ndef attention(\n q,\n k,\n v,\n out,\n cu_seqlens,\n max_s,\n softmax_scale,\n window_size_left=-1,\n):\n if HAS_FLASH_ATTN_V2:\n return flash_attn_2_cuda.varlen_fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n window_size_left,\n 0,\n False,\n None,\n )\n\n if HAS_FLASH_ATTN:\n if window_size_left != 0:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n\n # Flash attention v1 requires q, k and v to have the same number of heads\n if k.shape[1] != q.shape[1]:\n # MQA expand\n if k.shape[1] == 1:\n k = k.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = k.shape\n k = (\n k.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // k.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n if v.shape[1] != q.shape[1]:\n # MQA expand\n if v.shape[1] == 1:\n v = v.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = v.shape\n v = (\n v.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // v.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n\n return flash_attn_cuda.fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n False,\n 0,\n None,\n )\n\n raise NotImplementedError(\"flash attention is not installed\")\n", "path": "server/text_generation_server/utils/flash_attn.py"}]}
| 1,858 | 114 |
gh_patches_debug_23290
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-6107
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ignored exception on ci in windows related to monitor and socket.
This may be related to the intermitted failures of monitor tests on windows
Observing the following traceback
```
Exception ignored in: <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>
Traceback (most recent call last):
File "C:\hostedtoolcache\windows\Python\3.9.13\x64\lib\threading.py", line 917, in run
self._target(*self._args, **self._kwargs)
ResourceWarning: unclosed <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>
```
</issue>
<code>
[start of src/qcodes/monitor/monitor.py]
1 #! /usr/bin/env python
2 # vim:fenc=utf-8
3 #
4 # Copyright © 2017 unga <[email protected]>
5 #
6 # Distributed under terms of the MIT license.
7 """
8 Monitor a set of parameters in a background thread
9 stream output over websocket
10
11 To start monitor, run this file, or if qcodes is installed as a module:
12
13 ``% python -m qcodes.monitor.monitor``
14
15 Add parameters to monitor in your measurement by creating a new monitor with a
16 list of parameters to monitor:
17
18 ``monitor = qcodes.Monitor(param1, param2, param3, ...)``
19 """
20 from __future__ import annotations
21
22 import asyncio
23 import json
24 import logging
25 import os
26 import socketserver
27 import time
28 import webbrowser
29 from asyncio import CancelledError
30 from collections import defaultdict
31 from contextlib import suppress
32 from importlib.resources import as_file, files
33 from threading import Event, Thread
34 from typing import TYPE_CHECKING, Any, Callable
35
36 import websockets
37 import websockets.exceptions
38 import websockets.server
39
40 from qcodes.parameters import Parameter
41
42 if TYPE_CHECKING:
43 from collections.abc import Awaitable, Sequence
44
45 WEBSOCKET_PORT = 5678
46 SERVER_PORT = 3000
47
48 log = logging.getLogger(__name__)
49
50
51 def _get_metadata(
52 *parameters: Parameter, use_root_instrument: bool = True
53 ) -> dict[str, Any]:
54 """
55 Return a dictionary that contains the parameter metadata grouped by the
56 instrument it belongs to.
57 """
58 metadata_timestamp = time.time()
59 # group metadata by instrument
60 metas: dict[Any, Any] = defaultdict(list)
61 for parameter in parameters:
62 # Get the latest value from the parameter,
63 # respecting the max_val_age parameter
64 meta: dict[str, float | str | None] = {}
65 meta["value"] = str(parameter.get_latest())
66 timestamp = parameter.get_latest.get_timestamp()
67 if timestamp is not None:
68 meta["ts"] = timestamp.timestamp()
69 else:
70 meta["ts"] = None
71 meta["name"] = parameter.label or parameter.name
72 meta["unit"] = parameter.unit
73
74 # find the base instrument that this parameter belongs to
75 if use_root_instrument:
76 baseinst = parameter.root_instrument
77 else:
78 baseinst = parameter.instrument
79 if baseinst is None:
80 metas["Unbound Parameter"].append(meta)
81 else:
82 metas[str(baseinst)].append(meta)
83
84 # Create list of parameters, grouped by instrument
85 parameters_out = []
86 for instrument in metas:
87 temp = {"instrument": instrument, "parameters": metas[instrument]}
88 parameters_out.append(temp)
89
90 state = {"ts": metadata_timestamp, "parameters": parameters_out}
91 return state
92
93
94 def _handler(
95 parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True
96 ) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:
97 """
98 Return the websockets server handler.
99 """
100
101 async def server_func(
102 websocket: websockets.server.WebSocketServerProtocol, _: str
103 ) -> None:
104 """
105 Create a websockets handler that sends parameter values to a listener
106 every "interval" seconds.
107 """
108 while True:
109 try:
110 # Update the parameter values
111 try:
112 meta = _get_metadata(
113 *parameters, use_root_instrument=use_root_instrument
114 )
115 except ValueError:
116 log.exception("Error getting parameters")
117 break
118 log.debug("sending.. to %r", websocket)
119 await websocket.send(json.dumps(meta))
120 # Wait for interval seconds and then send again
121 await asyncio.sleep(interval)
122 except (CancelledError, websockets.exceptions.ConnectionClosed):
123 log.debug("Got CancelledError or ConnectionClosed",
124 exc_info=True)
125 break
126 log.debug("Closing websockets connection")
127
128 return server_func
129
130
131 class Monitor(Thread):
132 """
133 QCodes Monitor - WebSockets server to monitor qcodes parameters.
134 """
135
136 running: Monitor | None = None
137
138 def __init__(
139 self,
140 *parameters: Parameter,
141 interval: float = 1,
142 use_root_instrument: bool = True,
143 ):
144 """
145 Monitor qcodes parameters.
146
147 Args:
148 *parameters: Parameters to monitor.
149 interval: How often one wants to refresh the values.
150 use_root_instrument: Defines if parameters are grouped according to
151 parameter.root_instrument or parameter.instrument
152 """
153 super().__init__(daemon=True)
154
155 # Check that all values are valid parameters
156 for parameter in parameters:
157 if not isinstance(parameter, Parameter):
158 raise TypeError(f"We can only monitor QCodes "
159 f"Parameters, not {type(parameter)}")
160
161 self.loop: asyncio.AbstractEventLoop | None = None
162 self._stop_loop_future: asyncio.Future | None = None
163 self._parameters = parameters
164 self.loop_is_closed = Event()
165 self.server_is_started = Event()
166 self.handler = _handler(
167 parameters, interval=interval, use_root_instrument=use_root_instrument
168 )
169 log.debug("Start monitoring thread")
170 if Monitor.running:
171 # stop the old server
172 log.debug("Stopping and restarting server")
173 Monitor.running.stop()
174 self.start()
175
176 # Wait until the loop is running
177 self.server_is_started.wait(timeout=5)
178 if not self.server_is_started.is_set():
179 raise RuntimeError("Failed to start server")
180 Monitor.running = self
181
182 def run(self) -> None:
183 """
184 Start the event loop and run forever.
185 """
186 log.debug("Running Websocket server")
187
188 async def run_loop() -> None:
189 self.loop = asyncio.get_running_loop()
190 self._stop_loop_future = self.loop.create_future()
191
192 async with websockets.server.serve(
193 self.handler, "127.0.0.1", WEBSOCKET_PORT, close_timeout=1
194 ):
195 self.server_is_started.set()
196 try:
197 await self._stop_loop_future
198 except asyncio.CancelledError:
199 log.debug("Websocket server thread shutting down")
200
201 try:
202 asyncio.run(run_loop())
203 finally:
204 self.loop_is_closed.set()
205
206 def update_all(self) -> None:
207 """
208 Update all parameters in the monitor.
209 """
210 for parameter in self._parameters:
211 # call get if it can be called without arguments
212 with suppress(TypeError):
213 parameter.get()
214
215 def stop(self) -> None:
216 """
217 Shutdown the server, close the event loop and join the thread.
218 Setting active Monitor to ``None``.
219 """
220 self.join()
221 Monitor.running = None
222
223 def join(self, timeout: float | None = None) -> None:
224 """
225 Overwrite ``Thread.join`` to make sure server is stopped before
226 joining avoiding a potential deadlock.
227 """
228 log.debug("Shutting down server")
229 if not self.is_alive():
230 # we run this check before trying to run to prevent a cryptic
231 # error message
232 log.debug("monitor is dead")
233 return
234 try:
235 if self.loop is not None and self._stop_loop_future is not None:
236 log.debug("Instructing server to stop event loop.")
237 self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)
238 else:
239 log.debug("No event loop found. Cannot stop event loop.")
240 except RuntimeError:
241 # the above may throw a runtime error if the loop is already
242 # stopped in which case there is nothing more to do
243 log.exception("Could not close loop")
244 self.loop_is_closed.wait(timeout=5)
245 if not self.loop_is_closed.is_set():
246 raise RuntimeError("Failed to join loop")
247 log.debug("Loop reported closed")
248 super().join(timeout=timeout)
249 log.debug("Monitor Thread has joined")
250
251 @staticmethod
252 def show() -> None:
253 """
254 Overwrite this method to show/raise your monitor GUI
255 F.ex.
256
257 ::
258
259 import webbrowser
260 url = "localhost:3000"
261 # Open URL in new window, raising the window if possible.
262 webbrowser.open_new(url)
263
264 """
265 webbrowser.open(f"http://localhost:{SERVER_PORT}")
266
267
268 def main() -> None:
269 import http.server
270
271 # If this file is run, create a simple webserver that serves a simple
272 # website that can be used to view monitored parameters.
273 # # https://github.com/python/mypy/issues/4182
274 parent_module = ".".join(__loader__.name.split(".")[:-1]) # type: ignore[name-defined]
275
276 static_dir = files(parent_module).joinpath("dist")
277 try:
278 with as_file(static_dir) as extracted_dir:
279 os.chdir(extracted_dir)
280 log.info("Starting HTTP Server at http://localhost:%i", SERVER_PORT)
281 with socketserver.TCPServer(
282 ("", SERVER_PORT), http.server.SimpleHTTPRequestHandler
283 ) as httpd:
284 log.debug("serving directory %s", static_dir)
285 webbrowser.open(f"http://localhost:{SERVER_PORT}")
286 httpd.serve_forever()
287 except KeyboardInterrupt:
288 log.info("Shutting Down HTTP Server")
289
290
291 if __name__ == "__main__":
292 main()
293
[end of src/qcodes/monitor/monitor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/qcodes/monitor/monitor.py b/src/qcodes/monitor/monitor.py
--- a/src/qcodes/monitor/monitor.py
+++ b/src/qcodes/monitor/monitor.py
@@ -193,10 +193,8 @@
self.handler, "127.0.0.1", WEBSOCKET_PORT, close_timeout=1
):
self.server_is_started.set()
- try:
- await self._stop_loop_future
- except asyncio.CancelledError:
- log.debug("Websocket server thread shutting down")
+ await self._stop_loop_future
+ log.debug("Websocket server thread shutting down")
try:
asyncio.run(run_loop())
@@ -234,7 +232,7 @@
try:
if self.loop is not None and self._stop_loop_future is not None:
log.debug("Instructing server to stop event loop.")
- self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)
+ self.loop.call_soon_threadsafe(self._stop_loop_future.set_result, True)
else:
log.debug("No event loop found. Cannot stop event loop.")
except RuntimeError:
|
{"golden_diff": "diff --git a/src/qcodes/monitor/monitor.py b/src/qcodes/monitor/monitor.py\n--- a/src/qcodes/monitor/monitor.py\n+++ b/src/qcodes/monitor/monitor.py\n@@ -193,10 +193,8 @@\n self.handler, \"127.0.0.1\", WEBSOCKET_PORT, close_timeout=1\n ):\n self.server_is_started.set()\n- try:\n- await self._stop_loop_future\n- except asyncio.CancelledError:\n- log.debug(\"Websocket server thread shutting down\")\n+ await self._stop_loop_future\n+ log.debug(\"Websocket server thread shutting down\")\n \n try:\n asyncio.run(run_loop())\n@@ -234,7 +232,7 @@\n try:\n if self.loop is not None and self._stop_loop_future is not None:\n log.debug(\"Instructing server to stop event loop.\")\n- self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)\n+ self.loop.call_soon_threadsafe(self._stop_loop_future.set_result, True)\n else:\n log.debug(\"No event loop found. Cannot stop event loop.\")\n except RuntimeError:\n", "issue": "Ignored exception on ci in windows related to monitor and socket. \nThis may be related to the intermitted failures of monitor tests on windows\r\n\r\nObserving the following traceback\r\n```\r\nException ignored in: <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>\r\nTraceback (most recent call last):\r\n File \"C:\\hostedtoolcache\\windows\\Python\\3.9.13\\x64\\lib\\threading.py\", line 917, in run\r\n self._target(*self._args, **self._kwargs)\r\nResourceWarning: unclosed <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>\r\n```\n", "before_files": [{"content": "#! /usr/bin/env python\n# vim:fenc=utf-8\n#\n# Copyright \u00a9 2017 unga <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\"\"\"\nMonitor a set of parameters in a background thread\nstream output over websocket\n\nTo start monitor, run this file, or if qcodes is installed as a module:\n\n``% python -m qcodes.monitor.monitor``\n\nAdd parameters to monitor in your measurement by creating a new monitor with a\nlist of parameters to monitor:\n\n``monitor = qcodes.Monitor(param1, param2, param3, ...)``\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nimport os\nimport socketserver\nimport time\nimport webbrowser\nfrom asyncio import CancelledError\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom importlib.resources import as_file, files\nfrom threading import Event, Thread\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport websockets\nimport websockets.exceptions\nimport websockets.server\n\nfrom qcodes.parameters import Parameter\n\nif TYPE_CHECKING:\n from collections.abc import Awaitable, Sequence\n\nWEBSOCKET_PORT = 5678\nSERVER_PORT = 3000\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_metadata(\n *parameters: Parameter, use_root_instrument: bool = True\n) -> dict[str, Any]:\n \"\"\"\n Return a dictionary that contains the parameter metadata grouped by the\n instrument it belongs to.\n \"\"\"\n metadata_timestamp = time.time()\n # group metadata by instrument\n metas: dict[Any, Any] = defaultdict(list)\n for parameter in parameters:\n # Get the latest value from the parameter,\n # respecting the max_val_age parameter\n meta: dict[str, float | str | None] = {}\n meta[\"value\"] = str(parameter.get_latest())\n timestamp = parameter.get_latest.get_timestamp()\n if timestamp is not None:\n meta[\"ts\"] = timestamp.timestamp()\n else:\n meta[\"ts\"] = None\n meta[\"name\"] = parameter.label or parameter.name\n meta[\"unit\"] = parameter.unit\n\n # find the base instrument that this parameter belongs to\n if use_root_instrument:\n baseinst = parameter.root_instrument\n else:\n baseinst = parameter.instrument\n if baseinst is None:\n metas[\"Unbound Parameter\"].append(meta)\n else:\n metas[str(baseinst)].append(meta)\n\n # Create list of parameters, grouped by instrument\n parameters_out = []\n for instrument in metas:\n temp = {\"instrument\": instrument, \"parameters\": metas[instrument]}\n parameters_out.append(temp)\n\n state = {\"ts\": metadata_timestamp, \"parameters\": parameters_out}\n return state\n\n\ndef _handler(\n parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True\n) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:\n \"\"\"\n Return the websockets server handler.\n \"\"\"\n\n async def server_func(\n websocket: websockets.server.WebSocketServerProtocol, _: str\n ) -> None:\n \"\"\"\n Create a websockets handler that sends parameter values to a listener\n every \"interval\" seconds.\n \"\"\"\n while True:\n try:\n # Update the parameter values\n try:\n meta = _get_metadata(\n *parameters, use_root_instrument=use_root_instrument\n )\n except ValueError:\n log.exception(\"Error getting parameters\")\n break\n log.debug(\"sending.. to %r\", websocket)\n await websocket.send(json.dumps(meta))\n # Wait for interval seconds and then send again\n await asyncio.sleep(interval)\n except (CancelledError, websockets.exceptions.ConnectionClosed):\n log.debug(\"Got CancelledError or ConnectionClosed\",\n exc_info=True)\n break\n log.debug(\"Closing websockets connection\")\n\n return server_func\n\n\nclass Monitor(Thread):\n \"\"\"\n QCodes Monitor - WebSockets server to monitor qcodes parameters.\n \"\"\"\n\n running: Monitor | None = None\n\n def __init__(\n self,\n *parameters: Parameter,\n interval: float = 1,\n use_root_instrument: bool = True,\n ):\n \"\"\"\n Monitor qcodes parameters.\n\n Args:\n *parameters: Parameters to monitor.\n interval: How often one wants to refresh the values.\n use_root_instrument: Defines if parameters are grouped according to\n parameter.root_instrument or parameter.instrument\n \"\"\"\n super().__init__(daemon=True)\n\n # Check that all values are valid parameters\n for parameter in parameters:\n if not isinstance(parameter, Parameter):\n raise TypeError(f\"We can only monitor QCodes \"\n f\"Parameters, not {type(parameter)}\")\n\n self.loop: asyncio.AbstractEventLoop | None = None\n self._stop_loop_future: asyncio.Future | None = None\n self._parameters = parameters\n self.loop_is_closed = Event()\n self.server_is_started = Event()\n self.handler = _handler(\n parameters, interval=interval, use_root_instrument=use_root_instrument\n )\n log.debug(\"Start monitoring thread\")\n if Monitor.running:\n # stop the old server\n log.debug(\"Stopping and restarting server\")\n Monitor.running.stop()\n self.start()\n\n # Wait until the loop is running\n self.server_is_started.wait(timeout=5)\n if not self.server_is_started.is_set():\n raise RuntimeError(\"Failed to start server\")\n Monitor.running = self\n\n def run(self) -> None:\n \"\"\"\n Start the event loop and run forever.\n \"\"\"\n log.debug(\"Running Websocket server\")\n\n async def run_loop() -> None:\n self.loop = asyncio.get_running_loop()\n self._stop_loop_future = self.loop.create_future()\n\n async with websockets.server.serve(\n self.handler, \"127.0.0.1\", WEBSOCKET_PORT, close_timeout=1\n ):\n self.server_is_started.set()\n try:\n await self._stop_loop_future\n except asyncio.CancelledError:\n log.debug(\"Websocket server thread shutting down\")\n\n try:\n asyncio.run(run_loop())\n finally:\n self.loop_is_closed.set()\n\n def update_all(self) -> None:\n \"\"\"\n Update all parameters in the monitor.\n \"\"\"\n for parameter in self._parameters:\n # call get if it can be called without arguments\n with suppress(TypeError):\n parameter.get()\n\n def stop(self) -> None:\n \"\"\"\n Shutdown the server, close the event loop and join the thread.\n Setting active Monitor to ``None``.\n \"\"\"\n self.join()\n Monitor.running = None\n\n def join(self, timeout: float | None = None) -> None:\n \"\"\"\n Overwrite ``Thread.join`` to make sure server is stopped before\n joining avoiding a potential deadlock.\n \"\"\"\n log.debug(\"Shutting down server\")\n if not self.is_alive():\n # we run this check before trying to run to prevent a cryptic\n # error message\n log.debug(\"monitor is dead\")\n return\n try:\n if self.loop is not None and self._stop_loop_future is not None:\n log.debug(\"Instructing server to stop event loop.\")\n self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)\n else:\n log.debug(\"No event loop found. Cannot stop event loop.\")\n except RuntimeError:\n # the above may throw a runtime error if the loop is already\n # stopped in which case there is nothing more to do\n log.exception(\"Could not close loop\")\n self.loop_is_closed.wait(timeout=5)\n if not self.loop_is_closed.is_set():\n raise RuntimeError(\"Failed to join loop\")\n log.debug(\"Loop reported closed\")\n super().join(timeout=timeout)\n log.debug(\"Monitor Thread has joined\")\n\n @staticmethod\n def show() -> None:\n \"\"\"\n Overwrite this method to show/raise your monitor GUI\n F.ex.\n\n ::\n\n import webbrowser\n url = \"localhost:3000\"\n # Open URL in new window, raising the window if possible.\n webbrowser.open_new(url)\n\n \"\"\"\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n\n\ndef main() -> None:\n import http.server\n\n # If this file is run, create a simple webserver that serves a simple\n # website that can be used to view monitored parameters.\n # # https://github.com/python/mypy/issues/4182\n parent_module = \".\".join(__loader__.name.split(\".\")[:-1]) # type: ignore[name-defined]\n\n static_dir = files(parent_module).joinpath(\"dist\")\n try:\n with as_file(static_dir) as extracted_dir:\n os.chdir(extracted_dir)\n log.info(\"Starting HTTP Server at http://localhost:%i\", SERVER_PORT)\n with socketserver.TCPServer(\n (\"\", SERVER_PORT), http.server.SimpleHTTPRequestHandler\n ) as httpd:\n log.debug(\"serving directory %s\", static_dir)\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n httpd.serve_forever()\n except KeyboardInterrupt:\n log.info(\"Shutting Down HTTP Server\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/qcodes/monitor/monitor.py"}]}
| 3,567 | 260 |
gh_patches_debug_36388
|
rasdani/github-patches
|
git_diff
|
apache__airflow-18772
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove duplicate line of code for configuration copy
This is code improvement PR to remove a duplicate line. The code copies config in either cases of IF statement hence duplicate call.
Moving that before IF makes it unique and sufficient for the purpose.
I have moved first call above IF and removed ELSE block
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
</issue>
<code>
[start of airflow/task/task_runner/base_task_runner.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """Base task runner"""
19 import os
20 import subprocess
21 import threading
22 from pwd import getpwnam
23 from tempfile import NamedTemporaryFile
24 from typing import Optional, Union
25
26 from airflow.configuration import conf
27 from airflow.exceptions import AirflowConfigException
28 from airflow.models.taskinstance import load_error_file
29 from airflow.utils.configuration import tmp_configuration_copy
30 from airflow.utils.log.logging_mixin import LoggingMixin
31 from airflow.utils.net import get_hostname
32 from airflow.utils.platform import getuser
33
34 PYTHONPATH_VAR = 'PYTHONPATH'
35
36
37 class BaseTaskRunner(LoggingMixin):
38 """
39 Runs Airflow task instances by invoking the `airflow tasks run` command with raw
40 mode enabled in a subprocess.
41
42 :param local_task_job: The local task job associated with running the
43 associated task instance.
44 :type local_task_job: airflow.jobs.local_task_job.LocalTaskJob
45 """
46
47 def __init__(self, local_task_job):
48 # Pass task instance context into log handlers to setup the logger.
49 super().__init__(local_task_job.task_instance)
50 self._task_instance = local_task_job.task_instance
51
52 popen_prepend = []
53 if self._task_instance.run_as_user:
54 self.run_as_user = self._task_instance.run_as_user
55 else:
56 try:
57 self.run_as_user = conf.get('core', 'default_impersonation')
58 except AirflowConfigException:
59 self.run_as_user = None
60
61 # Add sudo commands to change user if we need to. Needed to handle SubDagOperator
62 # case using a SequentialExecutor.
63 self.log.debug("Planning to run as the %s user", self.run_as_user)
64 if self.run_as_user and (self.run_as_user != getuser()):
65 # We want to include any environment variables now, as we won't
66 # want to have to specify them in the sudo call - they would show
67 # up in `ps` that way! And run commands now, as the other user
68 # might not be able to run the cmds to get credentials
69 cfg_path = tmp_configuration_copy(chmod=0o600)
70
71 # Give ownership of file to user; only they can read and write
72 subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
73
74 # propagate PYTHONPATH environment variable
75 pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
76 popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]
77
78 if pythonpath_value:
79 popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')
80
81 else:
82 # Always provide a copy of the configuration file settings. Since
83 # we are running as the same user, and can pass through environment
84 # variables then we don't need to include those in the config copy
85 # - the runner can read/execute those values as it needs
86 cfg_path = tmp_configuration_copy(chmod=0o600)
87
88 self._error_file = NamedTemporaryFile(delete=True)
89 if self.run_as_user:
90 try:
91 os.chown(self._error_file.name, getpwnam(self.run_as_user).pw_uid, -1)
92 except KeyError:
93 # No user `run_as_user` found
94 pass
95
96 self._cfg_path = cfg_path
97 self._command = (
98 popen_prepend
99 + self._task_instance.command_as_list(
100 raw=True,
101 pickle_id=local_task_job.pickle_id,
102 mark_success=local_task_job.mark_success,
103 job_id=local_task_job.id,
104 pool=local_task_job.pool,
105 cfg_path=cfg_path,
106 )
107 + ["--error-file", self._error_file.name]
108 )
109 self.process = None
110
111 def deserialize_run_error(self) -> Optional[Union[str, Exception]]:
112 """Return task runtime error if its written to provided error file."""
113 return load_error_file(self._error_file)
114
115 def _read_task_logs(self, stream):
116 while True:
117 line = stream.readline()
118 if isinstance(line, bytes):
119 line = line.decode('utf-8')
120 if not line:
121 break
122 self.log.info(
123 'Job %s: Subtask %s %s',
124 self._task_instance.job_id,
125 self._task_instance.task_id,
126 line.rstrip('\n'),
127 )
128
129 def run_command(self, run_with=None):
130 """
131 Run the task command.
132
133 :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
134 :type run_with: list
135 :return: the process that was run
136 :rtype: subprocess.Popen
137 """
138 run_with = run_with or []
139 full_cmd = run_with + self._command
140
141 self.log.info("Running on host: %s", get_hostname())
142 self.log.info('Running: %s', full_cmd)
143
144 proc = subprocess.Popen(
145 full_cmd,
146 stdout=subprocess.PIPE,
147 stderr=subprocess.STDOUT,
148 universal_newlines=True,
149 close_fds=True,
150 env=os.environ.copy(),
151 preexec_fn=os.setsid,
152 )
153
154 # Start daemon thread to read subprocess logging output
155 log_reader = threading.Thread(
156 target=self._read_task_logs,
157 args=(proc.stdout,),
158 )
159 log_reader.daemon = True
160 log_reader.start()
161 return proc
162
163 def start(self):
164 """Start running the task instance in a subprocess."""
165 raise NotImplementedError()
166
167 def return_code(self) -> Optional[int]:
168 """
169 :return: The return code associated with running the task instance or
170 None if the task is not yet done.
171 :rtype: int
172 """
173 raise NotImplementedError()
174
175 def terminate(self) -> None:
176 """Force kill the running task instance."""
177 raise NotImplementedError()
178
179 def on_finish(self) -> None:
180 """A callback that should be called when this is done running."""
181 if self._cfg_path and os.path.isfile(self._cfg_path):
182 if self.run_as_user:
183 subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
184 else:
185 os.remove(self._cfg_path)
186 try:
187 self._error_file.close()
188 except FileNotFoundError:
189 # The subprocess has deleted this file before we do
190 # so we ignore
191 pass
192
[end of airflow/task/task_runner/base_task_runner.py]
[start of airflow/utils/configuration.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 import json
20 import os
21 from tempfile import mkstemp
22
23 from airflow.configuration import conf
24
25
26 def tmp_configuration_copy(chmod=0o600):
27 """
28 Returns a path for a temporary file including a full copy of the configuration
29 settings.
30 :return: a path to a temporary file
31 """
32 cfg_dict = conf.as_dict(display_sensitive=True, raw=True)
33 temp_fd, cfg_path = mkstemp()
34
35 with os.fdopen(temp_fd, 'w') as temp_file:
36 # Set the permissions before we write anything to it.
37 if chmod is not None:
38 os.fchmod(temp_fd, chmod)
39 json.dump(cfg_dict, temp_file)
40
41 return cfg_path
42
[end of airflow/utils/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/airflow/task/task_runner/base_task_runner.py b/airflow/task/task_runner/base_task_runner.py
--- a/airflow/task/task_runner/base_task_runner.py
+++ b/airflow/task/task_runner/base_task_runner.py
@@ -66,7 +66,7 @@
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
- cfg_path = tmp_configuration_copy(chmod=0o600)
+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True)
# Give ownership of file to user; only they can read and write
subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
@@ -83,7 +83,7 @@
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
- cfg_path = tmp_configuration_copy(chmod=0o600)
+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=False, include_cmds=False)
self._error_file = NamedTemporaryFile(delete=True)
if self.run_as_user:
diff --git a/airflow/utils/configuration.py b/airflow/utils/configuration.py
--- a/airflow/utils/configuration.py
+++ b/airflow/utils/configuration.py
@@ -23,13 +23,23 @@
from airflow.configuration import conf
-def tmp_configuration_copy(chmod=0o600):
+def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):
"""
Returns a path for a temporary file including a full copy of the configuration
settings.
+
+ :param include_env: Should the value of configuration from ``AIRFLOW__``
+ environment variables be included or not
+ :type include_env: bool
+ :param include_cmds: Should the result of calling any *_cmd config be
+ set (True, default), or should the _cmd options be left as the
+ command to run (False)
+ :type include_cmds: bool
:return: a path to a temporary file
"""
- cfg_dict = conf.as_dict(display_sensitive=True, raw=True)
+ cfg_dict = conf.as_dict(
+ display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env
+ )
temp_fd, cfg_path = mkstemp()
with os.fdopen(temp_fd, 'w') as temp_file:
|
{"golden_diff": "diff --git a/airflow/task/task_runner/base_task_runner.py b/airflow/task/task_runner/base_task_runner.py\n--- a/airflow/task/task_runner/base_task_runner.py\n+++ b/airflow/task/task_runner/base_task_runner.py\n@@ -66,7 +66,7 @@\n # want to have to specify them in the sudo call - they would show\n # up in `ps` that way! And run commands now, as the other user\n # might not be able to run the cmds to get credentials\n- cfg_path = tmp_configuration_copy(chmod=0o600)\n+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True)\n \n # Give ownership of file to user; only they can read and write\n subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)\n@@ -83,7 +83,7 @@\n # we are running as the same user, and can pass through environment\n # variables then we don't need to include those in the config copy\n # - the runner can read/execute those values as it needs\n- cfg_path = tmp_configuration_copy(chmod=0o600)\n+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=False, include_cmds=False)\n \n self._error_file = NamedTemporaryFile(delete=True)\n if self.run_as_user:\ndiff --git a/airflow/utils/configuration.py b/airflow/utils/configuration.py\n--- a/airflow/utils/configuration.py\n+++ b/airflow/utils/configuration.py\n@@ -23,13 +23,23 @@\n from airflow.configuration import conf\n \n \n-def tmp_configuration_copy(chmod=0o600):\n+def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):\n \"\"\"\n Returns a path for a temporary file including a full copy of the configuration\n settings.\n+\n+ :param include_env: Should the value of configuration from ``AIRFLOW__``\n+ environment variables be included or not\n+ :type include_env: bool\n+ :param include_cmds: Should the result of calling any *_cmd config be\n+ set (True, default), or should the _cmd options be left as the\n+ command to run (False)\n+ :type include_cmds: bool\n :return: a path to a temporary file\n \"\"\"\n- cfg_dict = conf.as_dict(display_sensitive=True, raw=True)\n+ cfg_dict = conf.as_dict(\n+ display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env\n+ )\n temp_fd, cfg_path = mkstemp()\n \n with os.fdopen(temp_fd, 'w') as temp_file:\n", "issue": "Remove duplicate line of code for configuration copy\nThis is code improvement PR to remove a duplicate line. The code copies config in either cases of IF statement hence duplicate call. \r\nMoving that before IF makes it unique and sufficient for the purpose.\r\n\r\nI have moved first call above IF and removed ELSE block\r\n\r\n---\r\n**^ Add meaningful description above**\r\n\r\nRead the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.\r\nIn case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.\r\nIn case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).\r\nIn case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Base task runner\"\"\"\nimport os\nimport subprocess\nimport threading\nfrom pwd import getpwnam\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, Union\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException\nfrom airflow.models.taskinstance import load_error_file\nfrom airflow.utils.configuration import tmp_configuration_copy\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.platform import getuser\n\nPYTHONPATH_VAR = 'PYTHONPATH'\n\n\nclass BaseTaskRunner(LoggingMixin):\n \"\"\"\n Runs Airflow task instances by invoking the `airflow tasks run` command with raw\n mode enabled in a subprocess.\n\n :param local_task_job: The local task job associated with running the\n associated task instance.\n :type local_task_job: airflow.jobs.local_task_job.LocalTaskJob\n \"\"\"\n\n def __init__(self, local_task_job):\n # Pass task instance context into log handlers to setup the logger.\n super().__init__(local_task_job.task_instance)\n self._task_instance = local_task_job.task_instance\n\n popen_prepend = []\n if self._task_instance.run_as_user:\n self.run_as_user = self._task_instance.run_as_user\n else:\n try:\n self.run_as_user = conf.get('core', 'default_impersonation')\n except AirflowConfigException:\n self.run_as_user = None\n\n # Add sudo commands to change user if we need to. Needed to handle SubDagOperator\n # case using a SequentialExecutor.\n self.log.debug(\"Planning to run as the %s user\", self.run_as_user)\n if self.run_as_user and (self.run_as_user != getuser()):\n # We want to include any environment variables now, as we won't\n # want to have to specify them in the sudo call - they would show\n # up in `ps` that way! And run commands now, as the other user\n # might not be able to run the cmds to get credentials\n cfg_path = tmp_configuration_copy(chmod=0o600)\n\n # Give ownership of file to user; only they can read and write\n subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)\n\n # propagate PYTHONPATH environment variable\n pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')\n popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]\n\n if pythonpath_value:\n popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')\n\n else:\n # Always provide a copy of the configuration file settings. Since\n # we are running as the same user, and can pass through environment\n # variables then we don't need to include those in the config copy\n # - the runner can read/execute those values as it needs\n cfg_path = tmp_configuration_copy(chmod=0o600)\n\n self._error_file = NamedTemporaryFile(delete=True)\n if self.run_as_user:\n try:\n os.chown(self._error_file.name, getpwnam(self.run_as_user).pw_uid, -1)\n except KeyError:\n # No user `run_as_user` found\n pass\n\n self._cfg_path = cfg_path\n self._command = (\n popen_prepend\n + self._task_instance.command_as_list(\n raw=True,\n pickle_id=local_task_job.pickle_id,\n mark_success=local_task_job.mark_success,\n job_id=local_task_job.id,\n pool=local_task_job.pool,\n cfg_path=cfg_path,\n )\n + [\"--error-file\", self._error_file.name]\n )\n self.process = None\n\n def deserialize_run_error(self) -> Optional[Union[str, Exception]]:\n \"\"\"Return task runtime error if its written to provided error file.\"\"\"\n return load_error_file(self._error_file)\n\n def _read_task_logs(self, stream):\n while True:\n line = stream.readline()\n if isinstance(line, bytes):\n line = line.decode('utf-8')\n if not line:\n break\n self.log.info(\n 'Job %s: Subtask %s %s',\n self._task_instance.job_id,\n self._task_instance.task_id,\n line.rstrip('\\n'),\n )\n\n def run_command(self, run_with=None):\n \"\"\"\n Run the task command.\n\n :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``\n :type run_with: list\n :return: the process that was run\n :rtype: subprocess.Popen\n \"\"\"\n run_with = run_with or []\n full_cmd = run_with + self._command\n\n self.log.info(\"Running on host: %s\", get_hostname())\n self.log.info('Running: %s', full_cmd)\n\n proc = subprocess.Popen(\n full_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n close_fds=True,\n env=os.environ.copy(),\n preexec_fn=os.setsid,\n )\n\n # Start daemon thread to read subprocess logging output\n log_reader = threading.Thread(\n target=self._read_task_logs,\n args=(proc.stdout,),\n )\n log_reader.daemon = True\n log_reader.start()\n return proc\n\n def start(self):\n \"\"\"Start running the task instance in a subprocess.\"\"\"\n raise NotImplementedError()\n\n def return_code(self) -> Optional[int]:\n \"\"\"\n :return: The return code associated with running the task instance or\n None if the task is not yet done.\n :rtype: int\n \"\"\"\n raise NotImplementedError()\n\n def terminate(self) -> None:\n \"\"\"Force kill the running task instance.\"\"\"\n raise NotImplementedError()\n\n def on_finish(self) -> None:\n \"\"\"A callback that should be called when this is done running.\"\"\"\n if self._cfg_path and os.path.isfile(self._cfg_path):\n if self.run_as_user:\n subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)\n else:\n os.remove(self._cfg_path)\n try:\n self._error_file.close()\n except FileNotFoundError:\n # The subprocess has deleted this file before we do\n # so we ignore\n pass\n", "path": "airflow/task/task_runner/base_task_runner.py"}, {"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nfrom tempfile import mkstemp\n\nfrom airflow.configuration import conf\n\n\ndef tmp_configuration_copy(chmod=0o600):\n \"\"\"\n Returns a path for a temporary file including a full copy of the configuration\n settings.\n :return: a path to a temporary file\n \"\"\"\n cfg_dict = conf.as_dict(display_sensitive=True, raw=True)\n temp_fd, cfg_path = mkstemp()\n\n with os.fdopen(temp_fd, 'w') as temp_file:\n # Set the permissions before we write anything to it.\n if chmod is not None:\n os.fchmod(temp_fd, chmod)\n json.dump(cfg_dict, temp_file)\n\n return cfg_path\n", "path": "airflow/utils/configuration.py"}]}
| 3,181 | 609 |
gh_patches_debug_61186
|
rasdani/github-patches
|
git_diff
|
benoitc__gunicorn-1071
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include request URL in error message
It would be really helpful if the logged error message were "Error handling request http://host/path/etc" instead of just "Error handling request".
</issue>
<code>
[start of gunicorn/workers/base.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import os
8 import signal
9 import sys
10 import time
11 import traceback
12 from random import randint
13
14
15 from gunicorn import util
16 from gunicorn.workers.workertmp import WorkerTmp
17 from gunicorn.reloader import Reloader
18 from gunicorn.http.errors import (
19 InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,
20 InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,
21 )
22 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
23 from gunicorn.http.wsgi import default_environ, Response
24 from gunicorn.six import MAXSIZE
25
26
27 class Worker(object):
28
29 SIGNALS = [getattr(signal, "SIG%s" % x)
30 for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
31
32 PIPE = []
33
34 def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
35 """\
36 This is called pre-fork so it shouldn't do anything to the
37 current process. If there's a need to make process wide
38 changes you'll want to do that in ``self.init_process()``.
39 """
40 self.age = age
41 self.ppid = ppid
42 self.sockets = sockets
43 self.app = app
44 self.timeout = timeout
45 self.cfg = cfg
46 self.booted = False
47 self.aborted = False
48 self.reloader = None
49
50 self.nr = 0
51 jitter = randint(0, cfg.max_requests_jitter)
52 self.max_requests = cfg.max_requests + jitter or MAXSIZE
53 self.alive = True
54 self.log = log
55 self.tmp = WorkerTmp(cfg)
56
57 def __str__(self):
58 return "<Worker %s>" % self.pid
59
60 @property
61 def pid(self):
62 return os.getpid()
63
64 def notify(self):
65 """\
66 Your worker subclass must arrange to have this method called
67 once every ``self.timeout`` seconds. If you fail in accomplishing
68 this task, the master process will murder your workers.
69 """
70 self.tmp.notify()
71
72 def run(self):
73 """\
74 This is the mainloop of a worker process. You should override
75 this method in a subclass to provide the intended behaviour
76 for your particular evil schemes.
77 """
78 raise NotImplementedError()
79
80 def init_process(self):
81 """\
82 If you override this method in a subclass, the last statement
83 in the function should be to call this method with
84 super(MyWorkerClass, self).init_process() so that the ``run()``
85 loop is initiated.
86 """
87
88 # start the reloader
89 if self.cfg.reload:
90 def changed(fname):
91 self.log.info("Worker reloading: %s modified", fname)
92 os.kill(self.pid, signal.SIGQUIT)
93 self.reloader = Reloader(callback=changed)
94 self.reloader.start()
95
96 # set environment' variables
97 if self.cfg.env:
98 for k, v in self.cfg.env.items():
99 os.environ[k] = v
100
101 util.set_owner_process(self.cfg.uid, self.cfg.gid)
102
103 # Reseed the random number generator
104 util.seed()
105
106 # For waking ourselves up
107 self.PIPE = os.pipe()
108 for p in self.PIPE:
109 util.set_non_blocking(p)
110 util.close_on_exec(p)
111
112 # Prevent fd inheritance
113 [util.close_on_exec(s) for s in self.sockets]
114 util.close_on_exec(self.tmp.fileno())
115
116 self.log.close_on_exec()
117
118 self.init_signals()
119
120 self.cfg.post_worker_init(self)
121
122 self.load_wsgi()
123
124 # Enter main run loop
125 self.booted = True
126 self.run()
127
128 def load_wsgi(self):
129 try:
130 self.wsgi = self.app.wsgi()
131 except SyntaxError as e:
132 if not self.cfg.reload:
133 raise
134
135 self.log.exception(e)
136
137 exc_type, exc_val, exc_tb = sys.exc_info()
138 self.reloader.add_extra_file(exc_val.filename)
139
140 tb_string = traceback.format_exc(exc_tb)
141 self.wsgi = util.make_fail_app(tb_string)
142
143 def init_signals(self):
144 # reset signaling
145 [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
146 # init new signaling
147 signal.signal(signal.SIGQUIT, self.handle_quit)
148 signal.signal(signal.SIGTERM, self.handle_exit)
149 signal.signal(signal.SIGINT, self.handle_quit)
150 signal.signal(signal.SIGWINCH, self.handle_winch)
151 signal.signal(signal.SIGUSR1, self.handle_usr1)
152 signal.signal(signal.SIGABRT, self.handle_abort)
153
154 # Don't let SIGTERM and SIGUSR1 disturb active requests
155 # by interrupting system calls
156 if hasattr(signal, 'siginterrupt'): # python >= 2.6
157 signal.siginterrupt(signal.SIGTERM, False)
158 signal.siginterrupt(signal.SIGUSR1, False)
159
160 def handle_usr1(self, sig, frame):
161 self.log.reopen_files()
162
163 def handle_exit(self, sig, frame):
164 self.alive = False
165
166 def handle_quit(self, sig, frame):
167 self.alive = False
168 # worker_int callback
169 self.cfg.worker_int(self)
170 time.sleep(0.1)
171 sys.exit(0)
172
173 def handle_abort(self, sig, frame):
174 self.alive = False
175 self.cfg.worker_abort(self)
176 sys.exit(1)
177
178 def handle_error(self, req, client, addr, exc):
179 request_start = datetime.now()
180 addr = addr or ('', -1) # unix socket case
181 if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
182 InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
183 LimitRequestLine, LimitRequestHeaders,
184 InvalidProxyLine, ForbiddenProxyRequest)):
185
186 status_int = 400
187 reason = "Bad Request"
188
189 if isinstance(exc, InvalidRequestLine):
190 mesg = "Invalid Request Line '%s'" % str(exc)
191 elif isinstance(exc, InvalidRequestMethod):
192 mesg = "Invalid Method '%s'" % str(exc)
193 elif isinstance(exc, InvalidHTTPVersion):
194 mesg = "Invalid HTTP Version '%s'" % str(exc)
195 elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
196 mesg = "%s" % str(exc)
197 if not req and hasattr(exc, "req"):
198 req = exc.req # for access log
199 elif isinstance(exc, LimitRequestLine):
200 mesg = "%s" % str(exc)
201 elif isinstance(exc, LimitRequestHeaders):
202 mesg = "Error parsing headers: '%s'" % str(exc)
203 elif isinstance(exc, InvalidProxyLine):
204 mesg = "'%s'" % str(exc)
205 elif isinstance(exc, ForbiddenProxyRequest):
206 reason = "Forbidden"
207 mesg = "Request forbidden"
208 status_int = 403
209
210 msg = "Invalid request from ip={ip}: {error}"
211 self.log.debug(msg.format(ip=addr[0], error=str(exc)))
212 else:
213 self.log.exception("Error handling request")
214
215 status_int = 500
216 reason = "Internal Server Error"
217 mesg = ""
218
219 if req is not None:
220 request_time = datetime.now() - request_start
221 environ = default_environ(req, client, self.cfg)
222 environ['REMOTE_ADDR'] = addr[0]
223 environ['REMOTE_PORT'] = str(addr[1])
224 resp = Response(req, client, self.cfg)
225 resp.status = "%s %s" % (status_int, reason)
226 resp.response_length = len(mesg)
227 self.log.access(resp, req, environ, request_time)
228
229 try:
230 util.write_error(client, status_int, reason, mesg)
231 except:
232 self.log.debug("Failed to send error message.")
233
234 def handle_winch(self, sig, fname):
235 # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
236 return
237
[end of gunicorn/workers/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -210,7 +210,7 @@
msg = "Invalid request from ip={ip}: {error}"
self.log.debug(msg.format(ip=addr[0], error=str(exc)))
else:
- self.log.exception("Error handling request")
+ self.log.exception("Error handling request %s", req.uri)
status_int = 500
reason = "Internal Server Error"
|
{"golden_diff": "diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py\n--- a/gunicorn/workers/base.py\n+++ b/gunicorn/workers/base.py\n@@ -210,7 +210,7 @@\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n- self.log.exception(\"Error handling request\")\n+ self.log.exception(\"Error handling request %s\", req.uri)\n \n status_int = 500\n reason = \"Internal Server Error\"\n", "issue": "Include request URL in error message\nIt would be really helpful if the logged error message were \"Error handling request http://host/path/etc\" instead of just \"Error handling request\".\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request\")\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}]}
| 2,944 | 126 |
gh_patches_debug_35072
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-1974
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
give better error when DATABASE_URL is empty
@jamieforrest hit this and I just did as well. Here's the gnarly Traceback we currently have:
```
pid-39707 thread-140735224484224 (MainThread) Oh no! Aspen crashed!
pid-39707 thread-140735224484224 (MainThread) Traceback (most recent call last):
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py", line 125, in main
pid-39707 thread-140735224484224 (MainThread) _main(argv)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py", line 51, in _main
pid-39707 thread-140735224484224 (MainThread) website = Website(argv)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/website.py", line 31, in __init__
pid-39707 thread-140735224484224 (MainThread) self.configure(argv)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py", line 343, in configure
pid-39707 thread-140735224484224 (MainThread) self.run_config_scripts()
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py", line 386, in run_config_scripts
pid-39707 thread-140735224484224 (MainThread) execfile(filepath, {'website': self})
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/configure-aspen.py", line 7, in <module>
pid-39707 thread-140735224484224 (MainThread) import gittip.authentication
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/authentication.py", line 10, in <module>
pid-39707 thread-140735224484224 (MainThread) from gittip.orm import db
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 73, in <module>
pid-39707 thread-140735224484224 (MainThread) db = SQLAlchemy()
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 31, in __init__
pid-39707 thread-140735224484224 (MainThread) self.session = self.create_session()
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 46, in create_session
pid-39707 thread-140735224484224 (MainThread) session.configure(bind=self.engine)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 38, in engine
pid-39707 thread-140735224484224 (MainThread) return create_engine(dburl, pool_size=maxconn, max_overflow=0)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py", line 331, in create_engine
pid-39707 thread-140735224484224 (MainThread) return strategy.create(*args, **kwargs)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/strategies.py", line 48, in create
pid-39707 thread-140735224484224 (MainThread) u = url.make_url(name_or_url)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py", line 154, in make_url
pid-39707 thread-140735224484224 (MainThread) return _parse_rfc1738_args(name_or_url)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py", line 196, in _parse_rfc1738_args
pid-39707 thread-140735224484224 (MainThread) "Could not parse rfc1738 URL from string '%s'" % name)
pid-39707 thread-140735224484224 (MainThread) ArgumentError: Could not parse rfc1738 URL from string ''
```
</issue>
<code>
[start of gittip/utils/swaddle.py]
1 """Run a command with an environment specified in a file.
2 """
3 import os
4 import sys
5 if sys.platform.startswith('win'):
6 import ctypes
7
8
9 def configure(envdef):
10 """Given a filepath or -, return a tuple of bytestrings.
11 """
12 if envdef == '-': # Heroku-style
13
14 # Heroku used to use =>, which was weird. Now they use :, and we may as
15 # well too, but I had already started using =, and now changing would
16 # break people's local.env.
17
18 m = "[SWADDLE] reading environment from stdin."
19 print >> sys.stderr, m
20 fp = sys.stdin
21 splitter = ": "
22 elif not os.path.isfile(envdef):
23 m = "[SWADDLE] %s is not a file; environment unchanged." % envdef
24 print >> sys.stderr, m
25 envdef = ""
26 else: # Gittip-style
27 fp = open(envdef)
28 splitter = "="
29
30 args = sys.argv[2:]
31 if not args:
32 m ="[SWADDLE] No command specified; exiting."
33 raise SystemExit(m)
34 if sys.platform == 'win32' and not os.path.isfile(args[0]):
35 # Try with an '.exe' extension on Windows if the command doesn't
36 # already have an extension.
37 if os.path.splitext(args[0])[-1] == '':
38 args[0] += '.exe'
39 if not os.path.isfile(args[0]):
40 m ="[SWADDLE] Command %s does not exist; exiting." % args[0]
41 raise SystemExit(m)
42
43 if envdef:
44 for line in fp:
45 line = line.split('#')[0].strip()
46 if splitter not in line:
47 m = "[SWADDLE] Skipping line: %s." % line
48 print >> sys.stderr, m
49 continue
50 key, val = line.split(splitter, 1)
51 if sys.platform.startswith('win'):
52 ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())
53 else:
54 os.environ[key.strip()] = val.strip()
55
56 return args
57
58
59 def main():
60 if len(sys.argv) < 1:
61 m ="[SWADDLE] Usage: %s {definition.env} {command} {args}" % sys.argv[0]
62 raise SystemExit(m)
63 envdef = sys.argv[1]
64 args = configure(envdef)
65 os.execv(args[0], args)
66
[end of gittip/utils/swaddle.py]
[start of setup.py]
1 import os
2 from setuptools import setup, find_packages
3
4
5 def get_version():
6 try:
7 return open(os.path.join('www', 'version.txt')).read().strip()
8 except OSError:
9 return 'n/a'
10
11
12 setup( name='gittip'
13 , version=get_version()
14 , packages=find_packages()
15 , entry_points = { 'console_scripts'
16 : [ 'payday=gittip.cli:payday'
17 , 'swaddle=gittip.utils.swaddle:main'
18 , 'fake_data=gittip.utils.fake_data:main'
19 ]
20 }
21 )
22
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gittip/utils/swaddle.py b/gittip/utils/swaddle.py
deleted file mode 100644
--- a/gittip/utils/swaddle.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""Run a command with an environment specified in a file.
-"""
-import os
-import sys
-if sys.platform.startswith('win'):
- import ctypes
-
-
-def configure(envdef):
- """Given a filepath or -, return a tuple of bytestrings.
- """
- if envdef == '-': # Heroku-style
-
- # Heroku used to use =>, which was weird. Now they use :, and we may as
- # well too, but I had already started using =, and now changing would
- # break people's local.env.
-
- m = "[SWADDLE] reading environment from stdin."
- print >> sys.stderr, m
- fp = sys.stdin
- splitter = ": "
- elif not os.path.isfile(envdef):
- m = "[SWADDLE] %s is not a file; environment unchanged." % envdef
- print >> sys.stderr, m
- envdef = ""
- else: # Gittip-style
- fp = open(envdef)
- splitter = "="
-
- args = sys.argv[2:]
- if not args:
- m ="[SWADDLE] No command specified; exiting."
- raise SystemExit(m)
- if sys.platform == 'win32' and not os.path.isfile(args[0]):
- # Try with an '.exe' extension on Windows if the command doesn't
- # already have an extension.
- if os.path.splitext(args[0])[-1] == '':
- args[0] += '.exe'
- if not os.path.isfile(args[0]):
- m ="[SWADDLE] Command %s does not exist; exiting." % args[0]
- raise SystemExit(m)
-
- if envdef:
- for line in fp:
- line = line.split('#')[0].strip()
- if splitter not in line:
- m = "[SWADDLE] Skipping line: %s." % line
- print >> sys.stderr, m
- continue
- key, val = line.split(splitter, 1)
- if sys.platform.startswith('win'):
- ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())
- else:
- os.environ[key.strip()] = val.strip()
-
- return args
-
-
-def main():
- if len(sys.argv) < 1:
- m ="[SWADDLE] Usage: %s {definition.env} {command} {args}" % sys.argv[0]
- raise SystemExit(m)
- envdef = sys.argv[1]
- args = configure(envdef)
- os.execv(args[0], args)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,6 @@
, packages=find_packages()
, entry_points = { 'console_scripts'
: [ 'payday=gittip.cli:payday'
- , 'swaddle=gittip.utils.swaddle:main'
, 'fake_data=gittip.utils.fake_data:main'
]
}
|
{"golden_diff": "diff --git a/gittip/utils/swaddle.py b/gittip/utils/swaddle.py\ndeleted file mode 100644\n--- a/gittip/utils/swaddle.py\n+++ /dev/null\n@@ -1,65 +0,0 @@\n-\"\"\"Run a command with an environment specified in a file.\n-\"\"\"\n-import os\n-import sys\n-if sys.platform.startswith('win'):\n- import ctypes\n-\n-\n-def configure(envdef):\n- \"\"\"Given a filepath or -, return a tuple of bytestrings.\n- \"\"\"\n- if envdef == '-': # Heroku-style\n-\n- # Heroku used to use =>, which was weird. Now they use :, and we may as\n- # well too, but I had already started using =, and now changing would\n- # break people's local.env.\n-\n- m = \"[SWADDLE] reading environment from stdin.\"\n- print >> sys.stderr, m\n- fp = sys.stdin\n- splitter = \": \"\n- elif not os.path.isfile(envdef):\n- m = \"[SWADDLE] %s is not a file; environment unchanged.\" % envdef\n- print >> sys.stderr, m\n- envdef = \"\"\n- else: # Gittip-style\n- fp = open(envdef)\n- splitter = \"=\"\n-\n- args = sys.argv[2:]\n- if not args:\n- m =\"[SWADDLE] No command specified; exiting.\"\n- raise SystemExit(m)\n- if sys.platform == 'win32' and not os.path.isfile(args[0]):\n- # Try with an '.exe' extension on Windows if the command doesn't\n- # already have an extension.\n- if os.path.splitext(args[0])[-1] == '':\n- args[0] += '.exe'\n- if not os.path.isfile(args[0]):\n- m =\"[SWADDLE] Command %s does not exist; exiting.\" % args[0]\n- raise SystemExit(m)\n-\n- if envdef:\n- for line in fp:\n- line = line.split('#')[0].strip()\n- if splitter not in line:\n- m = \"[SWADDLE] Skipping line: %s.\" % line\n- print >> sys.stderr, m\n- continue\n- key, val = line.split(splitter, 1)\n- if sys.platform.startswith('win'):\n- ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())\n- else:\n- os.environ[key.strip()] = val.strip()\n-\n- return args\n-\n-\n-def main():\n- if len(sys.argv) < 1:\n- m =\"[SWADDLE] Usage: %s {definition.env} {command} {args}\" % sys.argv[0]\n- raise SystemExit(m)\n- envdef = sys.argv[1]\n- args = configure(envdef)\n- os.execv(args[0], args)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,6 @@\n , packages=find_packages()\n , entry_points = { 'console_scripts'\n : [ 'payday=gittip.cli:payday'\n- , 'swaddle=gittip.utils.swaddle:main'\n , 'fake_data=gittip.utils.fake_data:main'\n ]\n }\n", "issue": "give better error when DATABASE_URL is empty\n@jamieforrest hit this and I just did as well. Here's the gnarly Traceback we currently have:\n\n```\npid-39707 thread-140735224484224 (MainThread) Oh no! Aspen crashed!\npid-39707 thread-140735224484224 (MainThread) Traceback (most recent call last):\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py\", line 125, in main\npid-39707 thread-140735224484224 (MainThread) _main(argv)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py\", line 51, in _main\npid-39707 thread-140735224484224 (MainThread) website = Website(argv)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/website.py\", line 31, in __init__\npid-39707 thread-140735224484224 (MainThread) self.configure(argv)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py\", line 343, in configure\npid-39707 thread-140735224484224 (MainThread) self.run_config_scripts()\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py\", line 386, in run_config_scripts\npid-39707 thread-140735224484224 (MainThread) execfile(filepath, {'website': self})\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/configure-aspen.py\", line 7, in <module>\npid-39707 thread-140735224484224 (MainThread) import gittip.authentication\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/authentication.py\", line 10, in <module>\npid-39707 thread-140735224484224 (MainThread) from gittip.orm import db\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 73, in <module>\npid-39707 thread-140735224484224 (MainThread) db = SQLAlchemy()\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 31, in __init__\npid-39707 thread-140735224484224 (MainThread) self.session = self.create_session()\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 46, in create_session\npid-39707 thread-140735224484224 (MainThread) session.configure(bind=self.engine)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 38, in engine\npid-39707 thread-140735224484224 (MainThread) return create_engine(dburl, pool_size=maxconn, max_overflow=0)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py\", line 331, in create_engine\npid-39707 thread-140735224484224 (MainThread) return strategy.create(*args, **kwargs)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/strategies.py\", line 48, in create\npid-39707 thread-140735224484224 (MainThread) u = url.make_url(name_or_url)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py\", line 154, in make_url\npid-39707 thread-140735224484224 (MainThread) return _parse_rfc1738_args(name_or_url)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py\", line 196, in _parse_rfc1738_args\npid-39707 thread-140735224484224 (MainThread) \"Could not parse rfc1738 URL from string '%s'\" % name)\npid-39707 thread-140735224484224 (MainThread) ArgumentError: Could not parse rfc1738 URL from string ''\n```\n\n", "before_files": [{"content": "\"\"\"Run a command with an environment specified in a file.\n\"\"\"\nimport os\nimport sys\nif sys.platform.startswith('win'):\n import ctypes\n\n\ndef configure(envdef):\n \"\"\"Given a filepath or -, return a tuple of bytestrings.\n \"\"\"\n if envdef == '-': # Heroku-style\n\n # Heroku used to use =>, which was weird. Now they use :, and we may as\n # well too, but I had already started using =, and now changing would\n # break people's local.env.\n\n m = \"[SWADDLE] reading environment from stdin.\"\n print >> sys.stderr, m\n fp = sys.stdin\n splitter = \": \"\n elif not os.path.isfile(envdef):\n m = \"[SWADDLE] %s is not a file; environment unchanged.\" % envdef\n print >> sys.stderr, m\n envdef = \"\"\n else: # Gittip-style\n fp = open(envdef)\n splitter = \"=\"\n\n args = sys.argv[2:]\n if not args:\n m =\"[SWADDLE] No command specified; exiting.\"\n raise SystemExit(m)\n if sys.platform == 'win32' and not os.path.isfile(args[0]):\n # Try with an '.exe' extension on Windows if the command doesn't\n # already have an extension.\n if os.path.splitext(args[0])[-1] == '':\n args[0] += '.exe'\n if not os.path.isfile(args[0]):\n m =\"[SWADDLE] Command %s does not exist; exiting.\" % args[0]\n raise SystemExit(m)\n\n if envdef:\n for line in fp:\n line = line.split('#')[0].strip()\n if splitter not in line:\n m = \"[SWADDLE] Skipping line: %s.\" % line\n print >> sys.stderr, m\n continue\n key, val = line.split(splitter, 1)\n if sys.platform.startswith('win'):\n ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())\n else:\n os.environ[key.strip()] = val.strip()\n\n return args\n\n\ndef main():\n if len(sys.argv) < 1:\n m =\"[SWADDLE] Usage: %s {definition.env} {command} {args}\" % sys.argv[0]\n raise SystemExit(m)\n envdef = sys.argv[1]\n args = configure(envdef)\n os.execv(args[0], args)\n", "path": "gittip/utils/swaddle.py"}, {"content": "import os\nfrom setuptools import setup, find_packages\n\n\ndef get_version():\n try:\n return open(os.path.join('www', 'version.txt')).read().strip()\n except OSError:\n return 'n/a'\n\n\nsetup( name='gittip'\n , version=get_version()\n , packages=find_packages()\n , entry_points = { 'console_scripts'\n : [ 'payday=gittip.cli:payday'\n , 'swaddle=gittip.utils.swaddle:main'\n , 'fake_data=gittip.utils.fake_data:main'\n ]\n }\n )\n", "path": "setup.py"}]}
| 3,105 | 746 |
gh_patches_debug_15818
|
rasdani/github-patches
|
git_diff
|
intel__dffml-177
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
model: scikit: setup.py needs correct entrypoints
```diff
diff --git a/model/scikit/setup.py b/model/scikit/setup.py
index 1bd6284..efbff80 100644
--- a/model/scikit/setup.py
+++ b/model/scikit/setup.py
@@ -57,5 +57,9 @@ setup(
],
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
- entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
+ entry_points={"dffml.model": [
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ ]},
)
```
</issue>
<code>
[start of model/scikit/setup.py]
1 import os
2 import ast
3 from io import open
4 from setuptools import find_packages, setup
5
6 ORG = "intel"
7 NAME = "dffml-model-scikit"
8 DESCRIPTION = "DFFML model scikit"
9 AUTHOR_NAME = "Yash Lamba"
10 AUTHOR_EMAIL = "[email protected]"
11 INSTALL_REQUIRES = ["scikit-learn>=0.21.2", "joblib>=0.13.2", "pandas>=0.25.0"]
12
13 IMPORT_NAME = (
14 NAME
15 if "replace_package_name".upper() != NAME
16 else "replace_import_package_name".upper()
17 ).replace("-", "_")
18
19 SELF_PATH = os.path.dirname(os.path.realpath(__file__))
20
21 with open(os.path.join(SELF_PATH, IMPORT_NAME, "version.py"), "r") as f:
22 for line in f:
23 if line.startswith("VERSION"):
24 version = ast.literal_eval(line.strip().split("=")[-1].strip())
25 break
26
27 with open(os.path.join(SELF_PATH, "README.md"), "r", encoding="utf-8") as f:
28 readme = f.read()
29
30 setup(
31 name="dffml-model-scikit",
32 version=version,
33 description="",
34 long_description=readme,
35 long_description_content_type="text/markdown",
36 author="Yash Lamba",
37 author_email="[email protected]",
38 maintainer="John Andersen",
39 maintainer_email="[email protected]",
40 url="https://github.com/intel/dffml/blob/master/model/scikit/README.md",
41 license="MIT",
42 keywords=["dffml"],
43 classifiers=[
44 "Development Status :: 3 - Alpha",
45 "Intended Audience :: Developers",
46 "License :: OSI Approved :: MIT License",
47 "Natural Language :: English",
48 "Operating System :: OS Independent",
49 "Programming Language :: Python :: 3 :: Only",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: Implementation :: CPython",
52 "Programming Language :: Python :: Implementation :: PyPy",
53 ],
54 install_requires=INSTALL_REQUIRES,
55 packages=find_packages(),
56 entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
57 )
58
[end of model/scikit/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/model/scikit/setup.py b/model/scikit/setup.py
--- a/model/scikit/setup.py
+++ b/model/scikit/setup.py
@@ -53,5 +53,18 @@
],
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
- entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
+ entry_points={
+ "dffml.model": [
+ f"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier",
+ f"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier",
+ f"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier",
+ f"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier",
+ f"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier",
+ f"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB",
+ f"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis",
+ f"scikitsvc = {IMPORT_NAME}.scikit_models:SVC",
+ f"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression",
+ ]
+ },
)
|
{"golden_diff": "diff --git a/model/scikit/setup.py b/model/scikit/setup.py\n--- a/model/scikit/setup.py\n+++ b/model/scikit/setup.py\n@@ -53,5 +53,18 @@\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n- entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\n+ entry_points={\n+ \"dffml.model\": [\n+ f\"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier\",\n+ f\"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier\",\n+ f\"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier\",\n+ f\"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier\",\n+ f\"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier\",\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier\",\n+ f\"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB\",\n+ f\"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis\",\n+ f\"scikitsvc = {IMPORT_NAME}.scikit_models:SVC\",\n+ f\"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression\",\n+ ]\n+ },\n )\n", "issue": "model: scikit: setup.py needs correct entrypoints\n```diff\r\ndiff --git a/model/scikit/setup.py b/model/scikit/setup.py\r\nindex 1bd6284..efbff80 100644\r\n--- a/model/scikit/setup.py\r\n+++ b/model/scikit/setup.py\r\n@@ -57,5 +57,9 @@ setup(\r\n ],\r\n install_requires=INSTALL_REQUIRES,\r\n packages=find_packages(),\r\n- entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\r\n+ entry_points={\"dffml.model\": [\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ ]},\r\n )\r\n```\n", "before_files": [{"content": "import os\nimport ast\nfrom io import open\nfrom setuptools import find_packages, setup\n\nORG = \"intel\"\nNAME = \"dffml-model-scikit\"\nDESCRIPTION = \"DFFML model scikit\"\nAUTHOR_NAME = \"Yash Lamba\"\nAUTHOR_EMAIL = \"[email protected]\"\nINSTALL_REQUIRES = [\"scikit-learn>=0.21.2\", \"joblib>=0.13.2\", \"pandas>=0.25.0\"]\n\nIMPORT_NAME = (\n NAME\n if \"replace_package_name\".upper() != NAME\n else \"replace_import_package_name\".upper()\n).replace(\"-\", \"_\")\n\nSELF_PATH = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(SELF_PATH, IMPORT_NAME, \"version.py\"), \"r\") as f:\n for line in f:\n if line.startswith(\"VERSION\"):\n version = ast.literal_eval(line.strip().split(\"=\")[-1].strip())\n break\n\nwith open(os.path.join(SELF_PATH, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"dffml-model-scikit\",\n version=version,\n description=\"\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Yash Lamba\",\n author_email=\"[email protected]\",\n maintainer=\"John Andersen\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/intel/dffml/blob/master/model/scikit/README.md\",\n license=\"MIT\",\n keywords=[\"dffml\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\n)\n", "path": "model/scikit/setup.py"}]}
| 1,338 | 317 |
gh_patches_debug_18895
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1186
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyhf json2xml requires pyhf[contrib]
# Description
```
$ pip install pyhf[xmlio]
$ pyhf json2xml -h
ERROR:pyhf.contrib.utils:No module named 'requests'
Installation of the contrib extra is required to use pyhf.contrib.utils.download
Please install with: python -m pip install pyhf[contrib]
Usage: pyhf json2xml [OPTIONS] [WORKSPACE]
Convert pyhf JSON back to XML + ROOT files.
Options:
--output-dir PATH
--specroot TEXT
--dataroot TEXT
--resultprefix TEXT
-p, --patch TEXT
-h, --help Show this message and exit.
```
# Expected Behavior
Shown the help without any reference to pyhf.contrib as this does not depend on contrib.
</issue>
<code>
[start of src/pyhf/contrib/cli.py]
1 """CLI for functionality that will get migrated out eventually."""
2 import logging
3 import click
4 from pathlib import Path
5
6 from . import utils
7
8 logging.basicConfig()
9 log = logging.getLogger(__name__)
10
11
12 @click.group(name="contrib")
13 def cli():
14 """
15 Contrib experimental operations.
16
17 .. note::
18
19 Requires installation of the ``contrib`` extra.
20
21 .. code-block:: shell
22
23 $ python -m pip install pyhf[contrib]
24 """
25
26
27 @cli.command()
28 @click.argument("archive-url")
29 @click.argument("output-directory")
30 @click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
31 @click.option(
32 "-f", "--force", is_flag=True, help="Force download from non-approved host"
33 )
34 @click.option(
35 "-c",
36 "--compress",
37 is_flag=True,
38 help="Keep the archive in a compressed tar.gz form",
39 )
40 def download(archive_url, output_directory, verbose, force, compress):
41 """
42 Download the patchset archive from the remote URL and extract it in a
43 directory at the path given.
44
45 Example:
46
47 .. code-block:: shell
48
49 $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods
50
51 \b
52 1Lbb-likelihoods/patchset.json
53 1Lbb-likelihoods/README.md
54 1Lbb-likelihoods/BkgOnly.json
55
56 Raises:
57 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
58 """
59 try:
60 utils.download(archive_url, output_directory, force, compress)
61
62 if verbose:
63 file_list = [str(file) for file in list(Path(output_directory).glob("*"))]
64 print("\n".join(file_list))
65 except AttributeError as excep:
66 exception_info = (
67 str(excep)
68 + "\nInstallation of the contrib extra is required to use the contrib CLI API"
69 + "\nPlease install with: python -m pip install pyhf[contrib]\n"
70 )
71 log.error(exception_info)
72
[end of src/pyhf/contrib/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pyhf/contrib/cli.py b/src/pyhf/contrib/cli.py
--- a/src/pyhf/contrib/cli.py
+++ b/src/pyhf/contrib/cli.py
@@ -3,8 +3,6 @@
import click
from pathlib import Path
-from . import utils
-
logging.basicConfig()
log = logging.getLogger(__name__)
@@ -22,6 +20,10 @@
$ python -m pip install pyhf[contrib]
"""
+ from . import utils # Guard CLI from missing extra
+
+ # TODO: https://github.com/scikit-hep/pyhf/issues/863
+ _ = utils # Placate pyflakes
@cli.command()
@@ -57,6 +59,8 @@
:class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
"""
try:
+ from . import utils
+
utils.download(archive_url, output_directory, force, compress)
if verbose:
|
{"golden_diff": "diff --git a/src/pyhf/contrib/cli.py b/src/pyhf/contrib/cli.py\n--- a/src/pyhf/contrib/cli.py\n+++ b/src/pyhf/contrib/cli.py\n@@ -3,8 +3,6 @@\n import click\n from pathlib import Path\n \n-from . import utils\n-\n logging.basicConfig()\n log = logging.getLogger(__name__)\n \n@@ -22,6 +20,10 @@\n \n $ python -m pip install pyhf[contrib]\n \"\"\"\n+ from . import utils # Guard CLI from missing extra\n+\n+ # TODO: https://github.com/scikit-hep/pyhf/issues/863\n+ _ = utils # Placate pyflakes\n \n \n @cli.command()\n@@ -57,6 +59,8 @@\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n+ from . import utils\n+\n utils.download(archive_url, output_directory, force, compress)\n \n if verbose:\n", "issue": "pyhf json2xml requires pyhf[contrib]\n# Description\r\n\r\n```\r\n$ pip install pyhf[xmlio]\r\n$ pyhf json2xml -h\r\nERROR:pyhf.contrib.utils:No module named 'requests'\r\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\r\nPlease install with: python -m pip install pyhf[contrib]\r\n\r\nUsage: pyhf json2xml [OPTIONS] [WORKSPACE]\r\n\r\n Convert pyhf JSON back to XML + ROOT files.\r\n\r\nOptions:\r\n --output-dir PATH\r\n --specroot TEXT\r\n --dataroot TEXT\r\n --resultprefix TEXT\r\n -p, --patch TEXT\r\n -h, --help Show this message and exit.\r\n```\r\n\r\n# Expected Behavior\r\n\r\nShown the help without any reference to pyhf.contrib as this does not depend on contrib.\n", "before_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nfrom . import utils\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n\n\[email protected]()\[email protected](\"archive-url\")\[email protected](\"output-directory\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/contrib/cli.py"}]}
| 1,322 | 226 |
gh_patches_debug_25711
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-8506
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
added %matplotlib --list so backends are easily visible
linked to #8444 adding --list argument to show the available backends
</issue>
<code>
[start of IPython/core/magics/pylab.py]
1 """Implementation of magic functions for matplotlib/pylab support.
2 """
3 from __future__ import print_function
4 #-----------------------------------------------------------------------------
5 # Copyright (c) 2012 The IPython Development Team.
6 #
7 # Distributed under the terms of the Modified BSD License.
8 #
9 # The full license is in the file COPYING.txt, distributed with this software.
10 #-----------------------------------------------------------------------------
11
12 #-----------------------------------------------------------------------------
13 # Imports
14 #-----------------------------------------------------------------------------
15
16 # Our own packages
17 from traitlets.config.application import Application
18 from IPython.core import magic_arguments
19 from IPython.core.magic import Magics, magics_class, line_magic
20 from IPython.testing.skipdoctest import skip_doctest
21 from IPython.utils.warn import warn
22 from IPython.core.pylabtools import backends
23
24 #-----------------------------------------------------------------------------
25 # Magic implementation classes
26 #-----------------------------------------------------------------------------
27
28 magic_gui_arg = magic_arguments.argument(
29 'gui', nargs='?',
30 help="""Name of the matplotlib backend to use %s.
31 If given, the corresponding matplotlib backend is used,
32 otherwise it will be matplotlib's default
33 (which you can set in your matplotlib config file).
34 """ % str(tuple(sorted(backends.keys())))
35 )
36
37
38 @magics_class
39 class PylabMagics(Magics):
40 """Magics related to matplotlib's pylab support"""
41
42 @skip_doctest
43 @line_magic
44 @magic_arguments.magic_arguments()
45 @magic_gui_arg
46 def matplotlib(self, line=''):
47 """Set up matplotlib to work interactively.
48
49 This function lets you activate matplotlib interactive support
50 at any point during an IPython session. It does not import anything
51 into the interactive namespace.
52
53 If you are using the inline matplotlib backend in the IPython Notebook
54 you can set which figure formats are enabled using the following::
55
56 In [1]: from IPython.display import set_matplotlib_formats
57
58 In [2]: set_matplotlib_formats('pdf', 'svg')
59
60 The default for inline figures sets `bbox_inches` to 'tight'. This can
61 cause discrepancies between the displayed image and the identical
62 image created using `savefig`. This behavior can be disabled using the
63 `%config` magic::
64
65 In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
66
67 In addition, see the docstring of
68 `IPython.display.set_matplotlib_formats` and
69 `IPython.display.set_matplotlib_close` for more information on
70 changing additional behaviors of the inline backend.
71
72 Examples
73 --------
74 To enable the inline backend for usage with the IPython Notebook::
75
76 In [1]: %matplotlib inline
77
78 In this case, where the matplotlib default is TkAgg::
79
80 In [2]: %matplotlib
81 Using matplotlib backend: TkAgg
82
83 But you can explicitly request a different GUI backend::
84
85 In [3]: %matplotlib qt
86 """
87 args = magic_arguments.parse_argstring(self.matplotlib, line)
88 gui, backend = self.shell.enable_matplotlib(args.gui)
89 self._show_matplotlib_backend(args.gui, backend)
90
91 @skip_doctest
92 @line_magic
93 @magic_arguments.magic_arguments()
94 @magic_arguments.argument(
95 '--no-import-all', action='store_true', default=None,
96 help="""Prevent IPython from performing ``import *`` into the interactive namespace.
97
98 You can govern the default behavior of this flag with the
99 InteractiveShellApp.pylab_import_all configurable.
100 """
101 )
102 @magic_gui_arg
103 def pylab(self, line=''):
104 """Load numpy and matplotlib to work interactively.
105
106 This function lets you activate pylab (matplotlib, numpy and
107 interactive support) at any point during an IPython session.
108
109 %pylab makes the following imports::
110
111 import numpy
112 import matplotlib
113 from matplotlib import pylab, mlab, pyplot
114 np = numpy
115 plt = pyplot
116
117 from IPython.display import display
118 from IPython.core.pylabtools import figsize, getfigs
119
120 from pylab import *
121 from numpy import *
122
123 If you pass `--no-import-all`, the last two `*` imports will be excluded.
124
125 See the %matplotlib magic for more details about activating matplotlib
126 without affecting the interactive namespace.
127 """
128 args = magic_arguments.parse_argstring(self.pylab, line)
129 if args.no_import_all is None:
130 # get default from Application
131 if Application.initialized():
132 app = Application.instance()
133 try:
134 import_all = app.pylab_import_all
135 except AttributeError:
136 import_all = True
137 else:
138 # nothing specified, no app - default True
139 import_all = True
140 else:
141 # invert no-import flag
142 import_all = not args.no_import_all
143
144 gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
145 self._show_matplotlib_backend(args.gui, backend)
146 print ("Populating the interactive namespace from numpy and matplotlib")
147 if clobbered:
148 warn("pylab import has clobbered these variables: %s" % clobbered +
149 "\n`%matplotlib` prevents importing * from pylab and numpy"
150 )
151
152 def _show_matplotlib_backend(self, gui, backend):
153 """show matplotlib message backend message"""
154 if not gui or gui == 'auto':
155 print("Using matplotlib backend: %s" % backend)
156
[end of IPython/core/magics/pylab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py
--- a/IPython/core/magics/pylab.py
+++ b/IPython/core/magics/pylab.py
@@ -42,6 +42,8 @@
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
+ @magic_arguments.argument('-l', '--list', action='store_true',
+ help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
@@ -83,10 +85,20 @@
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
+
+ You can list the available backends using the -l/--list option
+
+ In [4]: %matplotlib --list
+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
+ 'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
- gui, backend = self.shell.enable_matplotlib(args.gui)
- self._show_matplotlib_backend(args.gui, backend)
+ if args.list:
+ backends_list = list(backends.keys())
+ print("Available matplotlib backends: %s" % backends_list)
+ else:
+ gui, backend = self.shell.enable_matplotlib(args.gui)
+ self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
|
{"golden_diff": "diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py\n--- a/IPython/core/magics/pylab.py\n+++ b/IPython/core/magics/pylab.py\n@@ -42,6 +42,8 @@\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n+ @magic_arguments.argument('-l', '--list', action='store_true',\n+ help='Show available matplotlib backends')\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n@@ -83,10 +85,20 @@\n But you can explicitly request a different GUI backend::\n \n In [3]: %matplotlib qt\n+\n+ You can list the available backends using the -l/--list option\n+\n+ In [4]: %matplotlib --list\n+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',\n+ 'gtk', 'tk', 'inline']\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n- gui, backend = self.shell.enable_matplotlib(args.gui)\n- self._show_matplotlib_backend(args.gui, backend)\n+ if args.list:\n+ backends_list = list(backends.keys())\n+ print(\"Available matplotlib backends: %s\" % backends_list)\n+ else:\n+ gui, backend = self.shell.enable_matplotlib(args.gui)\n+ self._show_matplotlib_backend(args.gui, backend)\n \n @skip_doctest\n @line_magic\n", "issue": "added %matplotlib --list so backends are easily visible\nlinked to #8444 adding --list argument to show the available backends\n\n", "before_files": [{"content": "\"\"\"Implementation of magic functions for matplotlib/pylab support.\n\"\"\"\nfrom __future__ import print_function\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 The IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Our own packages\nfrom traitlets.config.application import Application\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils.warn import warn\nfrom IPython.core.pylabtools import backends\n\n#-----------------------------------------------------------------------------\n# Magic implementation classes\n#-----------------------------------------------------------------------------\n\nmagic_gui_arg = magic_arguments.argument(\n 'gui', nargs='?',\n help=\"\"\"Name of the matplotlib backend to use %s.\n If given, the corresponding matplotlib backend is used,\n otherwise it will be matplotlib's default\n (which you can set in your matplotlib config file).\n \"\"\" % str(tuple(sorted(backends.keys())))\n)\n\n\n@magics_class\nclass PylabMagics(Magics):\n \"\"\"Magics related to matplotlib's pylab support\"\"\"\n \n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n \n This function lets you activate matplotlib interactive support\n at any point during an IPython session. It does not import anything\n into the interactive namespace.\n \n If you are using the inline matplotlib backend in the IPython Notebook\n you can set which figure formats are enabled using the following::\n \n In [1]: from IPython.display import set_matplotlib_formats\n \n In [2]: set_matplotlib_formats('pdf', 'svg')\n\n The default for inline figures sets `bbox_inches` to 'tight'. This can\n cause discrepancies between the displayed image and the identical\n image created using `savefig`. This behavior can be disabled using the\n `%config` magic::\n \n In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}\n\n In addition, see the docstring of\n `IPython.display.set_matplotlib_formats` and\n `IPython.display.set_matplotlib_close` for more information on\n changing additional behaviors of the inline backend.\n\n Examples\n --------\n To enable the inline backend for usage with the IPython Notebook::\n \n In [1]: %matplotlib inline\n\n In this case, where the matplotlib default is TkAgg::\n\n In [2]: %matplotlib\n Using matplotlib backend: TkAgg\n\n But you can explicitly request a different GUI backend::\n\n In [3]: %matplotlib qt\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n gui, backend = self.shell.enable_matplotlib(args.gui)\n self._show_matplotlib_backend(args.gui, backend)\n\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '--no-import-all', action='store_true', default=None,\n help=\"\"\"Prevent IPython from performing ``import *`` into the interactive namespace.\n \n You can govern the default behavior of this flag with the\n InteractiveShellApp.pylab_import_all configurable.\n \"\"\"\n )\n @magic_gui_arg\n def pylab(self, line=''):\n \"\"\"Load numpy and matplotlib to work interactively.\n\n This function lets you activate pylab (matplotlib, numpy and\n interactive support) at any point during an IPython session.\n \n %pylab makes the following imports::\n \n import numpy\n import matplotlib\n from matplotlib import pylab, mlab, pyplot\n np = numpy\n plt = pyplot\n \n from IPython.display import display\n from IPython.core.pylabtools import figsize, getfigs\n \n from pylab import *\n from numpy import *\n\n If you pass `--no-import-all`, the last two `*` imports will be excluded.\n \n See the %matplotlib magic for more details about activating matplotlib\n without affecting the interactive namespace.\n \"\"\"\n args = magic_arguments.parse_argstring(self.pylab, line)\n if args.no_import_all is None:\n # get default from Application\n if Application.initialized():\n app = Application.instance()\n try:\n import_all = app.pylab_import_all\n except AttributeError:\n import_all = True\n else:\n # nothing specified, no app - default True\n import_all = True\n else:\n # invert no-import flag\n import_all = not args.no_import_all\n\n gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)\n self._show_matplotlib_backend(args.gui, backend)\n print (\"Populating the interactive namespace from numpy and matplotlib\")\n if clobbered:\n warn(\"pylab import has clobbered these variables: %s\" % clobbered +\n \"\\n`%matplotlib` prevents importing * from pylab and numpy\"\n )\n \n def _show_matplotlib_backend(self, gui, backend):\n \"\"\"show matplotlib message backend message\"\"\"\n if not gui or gui == 'auto':\n print(\"Using matplotlib backend: %s\" % backend)\n", "path": "IPython/core/magics/pylab.py"}]}
| 2,086 | 361 |
gh_patches_debug_16253
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-3030
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken heroku deploy button
When i try to deploy pontoon on heroku, i got an infinite charge..

I just clicked on button from README file and logged in to heroku platform
</issue>
<code>
[start of pontoon/base/management/commands/heroku_deploy_setup.py]
1 import os
2
3 from urllib.parse import urlparse, urljoin
4
5 from django.core.management.base import BaseCommand
6 from django.contrib.sites.models import Site
7
8 from pontoon.base.models import Project, User
9
10
11 class Command(BaseCommand):
12 help = "Setup an instance of Pontoon deployed via Heroku Deploy."
13
14 def handle(self, *args, **options):
15 site_url = os.environ.get("SITE_URL")
16 app_host = urlparse(site_url).netloc
17 admin_email = os.environ.get("ADMIN_EMAIL")
18 admin_password = os.environ.get("ADMIN_PASSWORD")
19
20 User.objects.create_superuser(admin_email, admin_email, admin_password)
21 Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
22
23 Project.objects.filter(slug="pontoon-intro").update(
24 url=urljoin(site_url, "intro/")
25 )
26
[end of pontoon/base/management/commands/heroku_deploy_setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pontoon/base/management/commands/heroku_deploy_setup.py b/pontoon/base/management/commands/heroku_deploy_setup.py
--- a/pontoon/base/management/commands/heroku_deploy_setup.py
+++ b/pontoon/base/management/commands/heroku_deploy_setup.py
@@ -1,11 +1,11 @@
import os
-from urllib.parse import urlparse, urljoin
+from urllib.parse import urlparse
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
-from pontoon.base.models import Project, User
+from pontoon.base.models import User
class Command(BaseCommand):
@@ -19,7 +19,3 @@
User.objects.create_superuser(admin_email, admin_email, admin_password)
Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
-
- Project.objects.filter(slug="pontoon-intro").update(
- url=urljoin(site_url, "intro/")
- )
|
{"golden_diff": "diff --git a/pontoon/base/management/commands/heroku_deploy_setup.py b/pontoon/base/management/commands/heroku_deploy_setup.py\n--- a/pontoon/base/management/commands/heroku_deploy_setup.py\n+++ b/pontoon/base/management/commands/heroku_deploy_setup.py\n@@ -1,11 +1,11 @@\n import os\n \n-from urllib.parse import urlparse, urljoin\n+from urllib.parse import urlparse\n \n from django.core.management.base import BaseCommand\n from django.contrib.sites.models import Site\n \n-from pontoon.base.models import Project, User\n+from pontoon.base.models import User\n \n \n class Command(BaseCommand):\n@@ -19,7 +19,3 @@\n \n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n-\n- Project.objects.filter(slug=\"pontoon-intro\").update(\n- url=urljoin(site_url, \"intro/\")\n- )\n", "issue": "Broken heroku deploy button\nWhen i try to deploy pontoon on heroku, i got an infinite charge..\r\n\r\n\r\nI just clicked on button from README file and logged in to heroku platform\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse, urljoin\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.sites.models import Site\n\nfrom pontoon.base.models import Project, User\n\n\nclass Command(BaseCommand):\n help = \"Setup an instance of Pontoon deployed via Heroku Deploy.\"\n\n def handle(self, *args, **options):\n site_url = os.environ.get(\"SITE_URL\")\n app_host = urlparse(site_url).netloc\n admin_email = os.environ.get(\"ADMIN_EMAIL\")\n admin_password = os.environ.get(\"ADMIN_PASSWORD\")\n\n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n\n Project.objects.filter(slug=\"pontoon-intro\").update(\n url=urljoin(site_url, \"intro/\")\n )\n", "path": "pontoon/base/management/commands/heroku_deploy_setup.py"}]}
| 882 | 214 |
gh_patches_debug_6556
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-8679
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: Lambda execution doesn't work with Golang binary lambdas: <title>
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Running `awslocal lambda invoke --function-name "localStage-mediaStack-backfillGetFunction91FAA-b70d6394" "manual_ test_output.json"` even a single time causes localstack to continuously try and fail to run the lambda, the terminal hands whilst docker compose logs output:
```
localstack_main | 2023-06-07T16:47:15.160 INFO --- [ asgi_gw_4] localstack.request.aws : AWS sts.AssumeRole => 200
localstack_main | 2023-06-07T16:47:16.000 INFO --- [ asgi_gw_0] localstack.request.http : POST /_localstack_lambda/f1a13ad3d39e7c5f771f6cf85f78456d/status/f1a13ad3d39e7c5f771f6cf85f78456d/error => 202
localstack_main | 2023-06-07T16:47:16.378 WARN --- [ asgi_gw_2] l.s.a.i.executor_endpoint : Execution environment startup failed: {"errorMessage":"Error: fork/exec /var/task/bootstrap: no such file or directory","errorType":"Runtime.InvalidEntrypoint"}
```
The logs won't stop until I pull the container down and restart it (hence I know it's continuously retrying to no avail)
### Expected Behavior
Lambda should be executed just like it does on AWS, returning a sample return string like "Test" (the lambda is very simple)
Even if it does fail (I'm not sure why it'd fail here but not on AWS) then it should try a small finite number of times and then stop
### How are you starting LocalStack?
With a docker-compose file (shown below)
### Steps To Reproduce
I don't think my code itself is the problem since it does work fine on AWS, but for a bit of context:
I use the cdk to compile the binaries and infrastructure which is deployed to LocalStack via:
cdklocal deploy -a "cdk.out/assembly-localStage/" --all --require-approval "never"
My infra:
https://github.com/KamWithK/exSTATic-backend/tree/master/infrastructure
I can potentially create a small isolated example which illustrates the problem if needed
### Environment
```markdown
version: "3.8"
services:
localstack:
container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}"
image: localstack/localstack:latest
ports:
- "127.0.0.1:4566:4566" # LocalStack Gateway
- "127.0.0.1:4510-4559:4510-4559" # external services port range
environment:
- DEBUG=${DEBUG-}
- DOCKER_HOST=unix:///var/run/docker.sock
- PERSISTENCE=/tmp/localstack/data
- AWS_DEFAULT_REGION=ap-southeast-2
volumes:
- "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack"
- "/var/run/docker.sock:/var/run/docker.sock"
healthcheck:
test: curl http://localhost:4566/_localstack/health
interval: 1s
timeout: 1s
retries: 10
```
### Anything else?
Here's some function info:
```
{
"FunctionName": "localStage-mediaStack-backfillGetFunction91FAA-b70d6394",
"FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:localStage-mediaStack-backfillGetFunction91FAA-b70d6394",
"Runtime": "provided.al2",
"Role": "arn:aws:iam::000000000000:role/localStage-mediaStack-backfillGetFunctionServi-f0162982",
"Handler": "bootstrap",
"CodeSize": 14604,
"Description": "",
"Timeout": 3,
"MemorySize": 128,
"LastModified": "2023-06-07T15:58:33.766076+0000",
"CodeSha256": "nqvy9NYt9j59ura5fEUE4QaeXOTuTiEsDNgEvsSQGdk=",
"Version": "$LATEST",
"TracingConfig": {
"Mode": "PassThrough"
},
"RevisionId": "377a6465-e32b-4ed3-a9f1-396af7ee0d86",
"PackageType": "Zip",
"Architectures": [
"x86_64"
],
"EphemeralStorage": {
"Size": 512
},
"SnapStart": {
"ApplyOn": "None",
"OptimizationStatus": "Off"
}
}
```
Test lambda:
```golang
package main
import (
"github.com/aws/aws-lambda-go/lambda"
)
func HandleRequest() (string, error) {
return "Test", nil
}
func main() {
lambda.Start(HandleRequest)
}
```
I did find this issue which sounded similar at first, but in my case the code does run on AWS (so I think it's a different problem, put here for reference though):
https://github.com/localstack/localstack/issues/4216
Any help would be greatly appreciated!
</issue>
<code>
[start of localstack/services/awslambda/packages.py]
1 import os
2 import platform
3 import stat
4 from typing import List
5
6 from localstack import config
7 from localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller
8 from localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException
9 from localstack.utils.platform import get_arch
10
11 LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}"
12
13 LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.18-pre"
14 LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION
15
16 # GO Lambda runtime
17 GO_RUNTIME_VERSION = "0.4.0"
18 GO_RUNTIME_DOWNLOAD_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz"
19
20
21 class AWSLambdaRuntimePackage(Package):
22 def __init__(self, default_version: str = LAMBDA_RUNTIME_VERSION):
23 super().__init__(name="AwsLambda", default_version=default_version)
24
25 def get_versions(self) -> List[str]:
26 return [LAMBDA_RUNTIME_VERSION]
27
28 def _get_installer(self, version: str) -> PackageInstaller:
29 return AWSLambdaRuntimePackageInstaller(name="awslambda-runtime", version=version)
30
31
32 class AWSLambdaRuntimePackageInstaller(DownloadInstaller):
33 def _get_arch(self):
34 arch = get_arch()
35 return "x86_64" if arch == "amd64" else arch
36
37 def _get_download_url(self) -> str:
38 arch = self._get_arch()
39 return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)
40
41 def _get_install_dir(self, target: InstallTarget) -> str:
42 install_dir = super()._get_install_dir(target)
43 arch = self._get_arch()
44 return os.path.join(install_dir, arch)
45
46 def _get_install_marker_path(self, install_dir: str) -> str:
47 return os.path.join(install_dir, "var", "rapid", "init")
48
49 def _install(self, target: InstallTarget) -> None:
50 super()._install(target)
51 install_location = self.get_executable_path()
52 st = os.stat(install_location)
53 os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
54
55
56 class AWSLambdaGoRuntimePackage(Package):
57 def __init__(self, default_version: str = GO_RUNTIME_VERSION):
58 super().__init__(name="AwsLambdaGo", default_version=default_version)
59
60 def get_versions(self) -> List[str]:
61 return [GO_RUNTIME_VERSION]
62
63 def _get_installer(self, version: str) -> PackageInstaller:
64 return AWSLambdaGoRuntimePackageInstaller(name="awslamba-go-runtime", version=version)
65
66
67 class AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):
68 def _get_download_url(self) -> str:
69 system = platform.system().lower()
70 arch = get_arch()
71
72 if system not in ["linux"]:
73 raise SystemNotSupportedException(f"Unsupported os {system} for awslambda-go-runtime")
74 if arch not in ["amd64", "arm64"]:
75 raise SystemNotSupportedException(f"Unsupported arch {arch} for awslambda-go-runtime")
76
77 return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(
78 version=GO_RUNTIME_VERSION,
79 os=system,
80 arch=arch,
81 )
82
83 def _get_install_marker_path(self, install_dir: str) -> str:
84 return os.path.join(install_dir, "aws-lambda-mock")
85
86 def _install(self, target: InstallTarget) -> None:
87 super()._install(target)
88
89 install_dir = self._get_install_dir(target)
90 install_location = self._get_install_marker_path(install_dir)
91 st = os.stat(install_location)
92 os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
93
94 go_lambda_mockserver = os.path.join(install_dir, "mockserver")
95 st = os.stat(go_lambda_mockserver)
96 os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
97
98
99 # version of the Maven dependency with Java utility code
100 LOCALSTACK_MAVEN_VERSION = "0.2.21"
101 MAVEN_REPO_URL = "https://repo1.maven.org/maven2"
102 URL_LOCALSTACK_FAT_JAR = (
103 "{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar"
104 )
105
106
107 class AWSLambdaJavaPackage(Package):
108 def __init__(self):
109 super().__init__("LambdaJavaLibs", "0.2.22")
110
111 def get_versions(self) -> List[str]:
112 return ["0.2.22", "0.2.21"]
113
114 def _get_installer(self, version: str) -> PackageInstaller:
115 return AWSLambdaJavaPackageInstaller("lambda-java-libs", version)
116
117
118 class AWSLambdaJavaPackageInstaller(DownloadInstaller):
119 def _get_download_url(self) -> str:
120 return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)
121
122
123 awslambda_runtime_package = AWSLambdaRuntimePackage()
124 awslambda_go_runtime_package = AWSLambdaGoRuntimePackage()
125 lambda_java_libs_package = AWSLambdaJavaPackage()
126
[end of localstack/services/awslambda/packages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/services/awslambda/packages.py b/localstack/services/awslambda/packages.py
--- a/localstack/services/awslambda/packages.py
+++ b/localstack/services/awslambda/packages.py
@@ -10,7 +10,7 @@
LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}"
-LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.18-pre"
+LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.19-pre"
LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION
# GO Lambda runtime
|
{"golden_diff": "diff --git a/localstack/services/awslambda/packages.py b/localstack/services/awslambda/packages.py\n--- a/localstack/services/awslambda/packages.py\n+++ b/localstack/services/awslambda/packages.py\n@@ -10,7 +10,7 @@\n \n LAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n \n-LAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.18-pre\"\n+LAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.19-pre\"\n LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION\n \n # GO Lambda runtime\n", "issue": "bug: Lambda execution doesn't work with Golang binary lambdas: <title>\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nRunning `awslocal lambda invoke --function-name \"localStage-mediaStack-backfillGetFunction91FAA-b70d6394\" \"manual_ test_output.json\"` even a single time causes localstack to continuously try and fail to run the lambda, the terminal hands whilst docker compose logs output:\r\n```\r\nlocalstack_main | 2023-06-07T16:47:15.160 INFO --- [ asgi_gw_4] localstack.request.aws : AWS sts.AssumeRole => 200\r\nlocalstack_main | 2023-06-07T16:47:16.000 INFO --- [ asgi_gw_0] localstack.request.http : POST /_localstack_lambda/f1a13ad3d39e7c5f771f6cf85f78456d/status/f1a13ad3d39e7c5f771f6cf85f78456d/error => 202\r\nlocalstack_main | 2023-06-07T16:47:16.378 WARN --- [ asgi_gw_2] l.s.a.i.executor_endpoint : Execution environment startup failed: {\"errorMessage\":\"Error: fork/exec /var/task/bootstrap: no such file or directory\",\"errorType\":\"Runtime.InvalidEntrypoint\"}\r\n```\r\n\r\nThe logs won't stop until I pull the container down and restart it (hence I know it's continuously retrying to no avail)\r\n\r\n### Expected Behavior\r\n\r\nLambda should be executed just like it does on AWS, returning a sample return string like \"Test\" (the lambda is very simple)\r\nEven if it does fail (I'm not sure why it'd fail here but not on AWS) then it should try a small finite number of times and then stop\r\n\r\n### How are you starting LocalStack?\r\n\r\nWith a docker-compose file (shown below)\r\n\r\n### Steps To Reproduce\r\n\r\nI don't think my code itself is the problem since it does work fine on AWS, but for a bit of context:\r\nI use the cdk to compile the binaries and infrastructure which is deployed to LocalStack via:\r\n cdklocal deploy -a \"cdk.out/assembly-localStage/\" --all --require-approval \"never\"\r\n\r\nMy infra:\r\nhttps://github.com/KamWithK/exSTATic-backend/tree/master/infrastructure\r\n\r\nI can potentially create a small isolated example which illustrates the problem if needed\r\n\r\n### Environment\r\n\r\n```markdown\r\nversion: \"3.8\"\r\n\r\nservices:\r\n localstack:\r\n container_name: \"${LOCALSTACK_DOCKER_NAME-localstack_main}\"\r\n image: localstack/localstack:latest\r\n ports:\r\n - \"127.0.0.1:4566:4566\" # LocalStack Gateway\r\n - \"127.0.0.1:4510-4559:4510-4559\" # external services port range\r\n environment:\r\n - DEBUG=${DEBUG-}\r\n - DOCKER_HOST=unix:///var/run/docker.sock\r\n - PERSISTENCE=/tmp/localstack/data\r\n - AWS_DEFAULT_REGION=ap-southeast-2\r\n volumes:\r\n - \"${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack\"\r\n - \"/var/run/docker.sock:/var/run/docker.sock\"\r\n healthcheck:\r\n test: curl http://localhost:4566/_localstack/health\r\n interval: 1s\r\n timeout: 1s\r\n retries: 10\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\nHere's some function info:\r\n```\r\n{\r\n \"FunctionName\": \"localStage-mediaStack-backfillGetFunction91FAA-b70d6394\",\r\n \"FunctionArn\": \"arn:aws:lambda:us-east-1:000000000000:function:localStage-mediaStack-backfillGetFunction91FAA-b70d6394\",\r\n \"Runtime\": \"provided.al2\",\r\n \"Role\": \"arn:aws:iam::000000000000:role/localStage-mediaStack-backfillGetFunctionServi-f0162982\",\r\n \"Handler\": \"bootstrap\",\r\n \"CodeSize\": 14604,\r\n \"Description\": \"\",\r\n \"Timeout\": 3,\r\n \"MemorySize\": 128,\r\n \"LastModified\": \"2023-06-07T15:58:33.766076+0000\",\r\n \"CodeSha256\": \"nqvy9NYt9j59ura5fEUE4QaeXOTuTiEsDNgEvsSQGdk=\",\r\n \"Version\": \"$LATEST\",\r\n \"TracingConfig\": {\r\n \"Mode\": \"PassThrough\"\r\n },\r\n \"RevisionId\": \"377a6465-e32b-4ed3-a9f1-396af7ee0d86\",\r\n \"PackageType\": \"Zip\",\r\n \"Architectures\": [\r\n \"x86_64\"\r\n ],\r\n \"EphemeralStorage\": {\r\n \"Size\": 512\r\n },\r\n \"SnapStart\": {\r\n \"ApplyOn\": \"None\",\r\n \"OptimizationStatus\": \"Off\"\r\n }\r\n }\r\n```\r\n\r\nTest lambda:\r\n```golang\r\npackage main\r\n\r\nimport (\r\n\t\"github.com/aws/aws-lambda-go/lambda\"\r\n)\r\n\r\nfunc HandleRequest() (string, error) {\r\n\treturn \"Test\", nil\r\n}\r\n\r\nfunc main() {\r\n\tlambda.Start(HandleRequest)\r\n}\r\n```\r\n\r\n\r\nI did find this issue which sounded similar at first, but in my case the code does run on AWS (so I think it's a different problem, put here for reference though):\r\nhttps://github.com/localstack/localstack/issues/4216\r\n\r\nAny help would be greatly appreciated!\n", "before_files": [{"content": "import os\nimport platform\nimport stat\nfrom typing import List\n\nfrom localstack import config\nfrom localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller\nfrom localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException\nfrom localstack.utils.platform import get_arch\n\nLAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n\nLAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.18-pre\"\nLAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION\n\n# GO Lambda runtime\nGO_RUNTIME_VERSION = \"0.4.0\"\nGO_RUNTIME_DOWNLOAD_URL_TEMPLATE = \"https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz\"\n\n\nclass AWSLambdaRuntimePackage(Package):\n def __init__(self, default_version: str = LAMBDA_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambda\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [LAMBDA_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaRuntimePackageInstaller(name=\"awslambda-runtime\", version=version)\n\n\nclass AWSLambdaRuntimePackageInstaller(DownloadInstaller):\n def _get_arch(self):\n arch = get_arch()\n return \"x86_64\" if arch == \"amd64\" else arch\n\n def _get_download_url(self) -> str:\n arch = self._get_arch()\n return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)\n\n def _get_install_dir(self, target: InstallTarget) -> str:\n install_dir = super()._get_install_dir(target)\n arch = self._get_arch()\n return os.path.join(install_dir, arch)\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"var\", \"rapid\", \"init\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n install_location = self.get_executable_path()\n st = os.stat(install_location)\n os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\nclass AWSLambdaGoRuntimePackage(Package):\n def __init__(self, default_version: str = GO_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambdaGo\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [GO_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaGoRuntimePackageInstaller(name=\"awslamba-go-runtime\", version=version)\n\n\nclass AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):\n def _get_download_url(self) -> str:\n system = platform.system().lower()\n arch = get_arch()\n\n if system not in [\"linux\"]:\n raise SystemNotSupportedException(f\"Unsupported os {system} for awslambda-go-runtime\")\n if arch not in [\"amd64\", \"arm64\"]:\n raise SystemNotSupportedException(f\"Unsupported arch {arch} for awslambda-go-runtime\")\n\n return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(\n version=GO_RUNTIME_VERSION,\n os=system,\n arch=arch,\n )\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"aws-lambda-mock\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n\n install_dir = self._get_install_dir(target)\n install_location = self._get_install_marker_path(install_dir)\n st = os.stat(install_location)\n os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n go_lambda_mockserver = os.path.join(install_dir, \"mockserver\")\n st = os.stat(go_lambda_mockserver)\n os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = \"0.2.21\"\nMAVEN_REPO_URL = \"https://repo1.maven.org/maven2\"\nURL_LOCALSTACK_FAT_JAR = (\n \"{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar\"\n)\n\n\nclass AWSLambdaJavaPackage(Package):\n def __init__(self):\n super().__init__(\"LambdaJavaLibs\", \"0.2.22\")\n\n def get_versions(self) -> List[str]:\n return [\"0.2.22\", \"0.2.21\"]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaJavaPackageInstaller(\"lambda-java-libs\", version)\n\n\nclass AWSLambdaJavaPackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)\n\n\nawslambda_runtime_package = AWSLambdaRuntimePackage()\nawslambda_go_runtime_package = AWSLambdaGoRuntimePackage()\nlambda_java_libs_package = AWSLambdaJavaPackage()\n", "path": "localstack/services/awslambda/packages.py"}]}
| 3,352 | 154 |
gh_patches_debug_13676
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-2408
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migration 0040_set_admin_is_staff.py is missing dependency on user model
</issue>
<code>
[start of pulpcore/app/migrations/0040_set_admin_is_staff.py]
1 # Generated by Django 2.2.13 on 2020-07-01 21:29
2
3 from django.contrib.auth import get_user_model
4 from django.db import migrations
5
6
7 def allow_admin_as_staff(apps, schema_editor):
8 user_model = get_user_model()
9 try:
10 admin_user = user_model.objects.get(username='admin')
11 except user_model.DoesNotExist:
12 pass
13 else:
14 admin_user.is_staff = True
15 admin_user.save()
16
17
18 class Migration(migrations.Migration):
19
20 dependencies = [
21 ('core', '0039_change_download_concurrency'),
22 ]
23
24 operations = [
25 migrations.RunPython(allow_admin_as_staff),
26 ]
27
[end of pulpcore/app/migrations/0040_set_admin_is_staff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py
--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py
+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py
@@ -1,5 +1,6 @@
# Generated by Django 2.2.13 on 2020-07-01 21:29
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations
@@ -19,8 +20,9 @@
dependencies = [
('core', '0039_change_download_concurrency'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
- migrations.RunPython(allow_admin_as_staff),
+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),
]
|
{"golden_diff": "diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py\n--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py\n+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py\n@@ -1,5 +1,6 @@\n # Generated by Django 2.2.13 on 2020-07-01 21:29\n \n+from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.db import migrations\n \n@@ -19,8 +20,9 @@\n \n dependencies = [\n ('core', '0039_change_download_concurrency'),\n+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n \n operations = [\n- migrations.RunPython(allow_admin_as_staff),\n+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),\n ]\n", "issue": "Migration 0040_set_admin_is_staff.py is missing dependency on user model\n\n", "before_files": [{"content": "# Generated by Django 2.2.13 on 2020-07-01 21:29\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import migrations\n\n\ndef allow_admin_as_staff(apps, schema_editor):\n user_model = get_user_model()\n try:\n admin_user = user_model.objects.get(username='admin')\n except user_model.DoesNotExist:\n pass\n else:\n admin_user.is_staff = True\n admin_user.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0039_change_download_concurrency'),\n ]\n\n operations = [\n migrations.RunPython(allow_admin_as_staff),\n ]\n", "path": "pulpcore/app/migrations/0040_set_admin_is_staff.py"}]}
| 772 | 224 |
gh_patches_debug_3692
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-566
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`nautobot.core.api.serializers.WritableNestedSerializer` docstring is incorrect and confusing
<!--
NOTE: This template is for use by maintainers only. Please do not submit
an issue using this template unless you have been specifically asked to
do so.
-->
### Proposed Changes
The docstring should read something like this:
> Returns a nested representation of an object on read, but accepts either the nested representation or just the PK value on write operations.
<!-- Provide justification for the proposed change(s). -->
### Justification
The current docstring is in conflict with the actual implementation by stating that only the PK value is supported on write, which is incorrect.
</issue>
<code>
[start of nautobot/core/api/serializers.py]
1 import uuid
2
3 from django.core.exceptions import (
4 FieldError,
5 MultipleObjectsReturned,
6 ObjectDoesNotExist,
7 )
8 from django.db.models import AutoField, ManyToManyField
9 from drf_yasg.utils import swagger_serializer_method
10 from rest_framework import serializers
11 from rest_framework.exceptions import ValidationError
12
13 from nautobot.utilities.utils import dict_to_filter_params
14
15
16 class BaseModelSerializer(serializers.ModelSerializer):
17 """
18 This base serializer implements common fields and logic for all ModelSerializers.
19 Namely it defines the `display` field which exposes a human friendly value for the given object.
20 """
21
22 display = serializers.SerializerMethodField(read_only=True, help_text="Human friendly display value")
23
24 @swagger_serializer_method(serializer_or_field=serializers.CharField)
25 def get_display(self, instance):
26 """
27 Return either the `display` property of the instance or `str(instance)`
28 """
29 return getattr(instance, "display", str(instance))
30
31 def get_field_names(self, declared_fields, info):
32 """
33 Override get_field_names() to append the `display` field so it is always included in the
34 serializer's `Meta.fields`.
35
36 DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields
37 on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared
38 fields from the super. This means either we intercept and append the display field at this level, or
39 enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`
40 which would surely lead to errors of omission; therefore we have chosen the former approach.
41 """
42 fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple
43 fields.append("display")
44
45 return fields
46
47
48 class ValidatedModelSerializer(BaseModelSerializer):
49 """
50 Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during
51 validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)
52 """
53
54 def validate(self, data):
55
56 # Remove custom fields data and tags (if any) prior to model validation
57 attrs = data.copy()
58 attrs.pop("custom_fields", None)
59 attrs.pop("tags", None)
60
61 # Skip ManyToManyFields
62 for field in self.Meta.model._meta.get_fields():
63 if isinstance(field, ManyToManyField):
64 attrs.pop(field.name, None)
65
66 # Run clean() on an instance of the model
67 if self.instance is None:
68 instance = self.Meta.model(**attrs)
69 else:
70 instance = self.instance
71 for k, v in attrs.items():
72 setattr(instance, k, v)
73 instance.full_clean()
74
75 return data
76
77
78 class WritableNestedSerializer(BaseModelSerializer):
79 """
80 Returns a nested representation of an object on read, but accepts only a primary key on write.
81 """
82
83 def to_internal_value(self, data):
84
85 if data is None:
86 return None
87
88 # Dictionary of related object attributes
89 if isinstance(data, dict):
90 params = dict_to_filter_params(data)
91 queryset = self.Meta.model.objects
92 try:
93 return queryset.get(**params)
94 except ObjectDoesNotExist:
95 raise ValidationError("Related object not found using the provided attributes: {}".format(params))
96 except MultipleObjectsReturned:
97 raise ValidationError("Multiple objects match the provided attributes: {}".format(params))
98 except FieldError as e:
99 raise ValidationError(e)
100
101 queryset = self.Meta.model.objects
102 pk = None
103
104 if isinstance(self.Meta.model._meta.pk, AutoField):
105 # PK is an int for this model. This is usually the User model
106 try:
107 pk = int(data)
108 except (TypeError, ValueError):
109 raise ValidationError(
110 "Related objects must be referenced by ID or by dictionary of attributes. Received an "
111 "unrecognized value: {}".format(data)
112 )
113
114 else:
115 # We assume a type of UUIDField for all other models
116
117 # PK of related object
118 try:
119 # Ensure the pk is a valid UUID
120 pk = uuid.UUID(str(data))
121 except (TypeError, ValueError):
122 raise ValidationError(
123 "Related objects must be referenced by ID or by dictionary of attributes. Received an "
124 "unrecognized value: {}".format(data)
125 )
126
127 try:
128 return queryset.get(pk=pk)
129 except ObjectDoesNotExist:
130 raise ValidationError("Related object not found using the provided ID: {}".format(pk))
131
132
133 class BulkOperationSerializer(serializers.Serializer):
134 id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model
135
136
137 #
138 # GraphQL, used by the openapi doc, not by the view
139 #
140
141
142 class GraphQLAPISerializer(serializers.Serializer):
143 query = serializers.CharField(required=True, help_text="GraphQL query")
144 variables = serializers.JSONField(required=False, help_text="Variables in JSON Format")
145
[end of nautobot/core/api/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py
--- a/nautobot/core/api/serializers.py
+++ b/nautobot/core/api/serializers.py
@@ -77,7 +77,8 @@
class WritableNestedSerializer(BaseModelSerializer):
"""
- Returns a nested representation of an object on read, but accepts only a primary key on write.
+ Returns a nested representation of an object on read, but accepts either the nested representation or the
+ primary key value on write operations.
"""
def to_internal_value(self, data):
|
{"golden_diff": "diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py\n--- a/nautobot/core/api/serializers.py\n+++ b/nautobot/core/api/serializers.py\n@@ -77,7 +77,8 @@\n \n class WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n- Returns a nested representation of an object on read, but accepts only a primary key on write.\n+ Returns a nested representation of an object on read, but accepts either the nested representation or the\n+ primary key value on write operations.\n \"\"\"\n \n def to_internal_value(self, data):\n", "issue": "`nautobot.core.api.serializers.WritableNestedSerializer` docstring is incorrect and confusing\n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\nThe docstring should read something like this:\r\n> Returns a nested representation of an object on read, but accepts either the nested representation or just the PK value on write operations.\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\nThe current docstring is in conflict with the actual implementation by stating that only the PK value is supported on write, which is incorrect.\n", "before_files": [{"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass BaseModelSerializer(serializers.ModelSerializer):\n \"\"\"\n This base serializer implements common fields and logic for all ModelSerializers.\n Namely it defines the `display` field which exposes a human friendly value for the given object.\n \"\"\"\n\n display = serializers.SerializerMethodField(read_only=True, help_text=\"Human friendly display value\")\n\n @swagger_serializer_method(serializer_or_field=serializers.CharField)\n def get_display(self, instance):\n \"\"\"\n Return either the `display` property of the instance or `str(instance)`\n \"\"\"\n return getattr(instance, \"display\", str(instance))\n\n def get_field_names(self, declared_fields, info):\n \"\"\"\n Override get_field_names() to append the `display` field so it is always included in the\n serializer's `Meta.fields`.\n\n DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields\n on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared\n fields from the super. This means either we intercept and append the display field at this level, or\n enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`\n which would surely lead to errors of omission; therefore we have chosen the former approach.\n \"\"\"\n fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple\n fields.append(\"display\")\n\n return fields\n\n\nclass ValidatedModelSerializer(BaseModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts only a primary key on write.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}]}
| 2,078 | 135 |
gh_patches_debug_4
|
rasdani/github-patches
|
git_diff
|
horovod__horovod-1139
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Replace .step(synchronize=False) with optimizer.skip_synchronize()
NVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`.
This PR switches API to use context manager:
```python
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
```
</issue>
<code>
[start of horovod/__init__.py]
1 __version__ = '0.16.3'
2
[end of horovod/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/horovod/__init__.py b/horovod/__init__.py
--- a/horovod/__init__.py
+++ b/horovod/__init__.py
@@ -1 +1 @@
-__version__ = '0.16.3'
+__version__ = '0.16.4'
|
{"golden_diff": "diff --git a/horovod/__init__.py b/horovod/__init__.py\n--- a/horovod/__init__.py\n+++ b/horovod/__init__.py\n@@ -1 +1 @@\n-__version__ = '0.16.3'\n+__version__ = '0.16.4'\n", "issue": "Replace .step(synchronize=False) with optimizer.skip_synchronize()\nNVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`.\r\n\r\nThis PR switches API to use context manager:\r\n```python\r\noptimizer.synchronize()\r\nwith optimizer.skip_synchronize():\r\n optimizer.step()\r\n```\n", "before_files": [{"content": "__version__ = '0.16.3'\n", "path": "horovod/__init__.py"}]}
| 619 | 76 |
gh_patches_debug_30797
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-1567
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update our SOCKS documentation
- `socks4a://` is useless, all DNS queries are resolved client-side for SOCKS4.
- `socks5://` should have a warning about DNS queries being resolved client-side and that `socks5h://` is recommended for secure use (Should have been the default on our end).
- PySocks supports IPv6 connections to the SOCKS proxy.
- Need to check all our other assumptions in those docs.
</issue>
<code>
[start of src/urllib3/contrib/socks.py]
1 # -*- coding: utf-8 -*-
2 """
3 This module contains provisional support for SOCKS proxies from within
4 urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
5 SOCKS5. To enable its functionality, either install PySocks or install this
6 module with the ``socks`` extra.
7
8 The SOCKS implementation supports the full range of urllib3 features. It also
9 supports the following SOCKS features:
10
11 - SOCKS4
12 - SOCKS4a
13 - SOCKS5
14 - Usernames and passwords for the SOCKS proxy
15
16 Known Limitations:
17
18 - Currently PySocks does not support contacting remote websites via literal
19 IPv6 addresses. Any such connection attempt will fail. You must use a domain
20 name.
21 - Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
22 such connection attempt will fail.
23 """
24 from __future__ import absolute_import
25
26 try:
27 import socks
28 except ImportError:
29 import warnings
30 from ..exceptions import DependencyWarning
31
32 warnings.warn((
33 'SOCKS support in urllib3 requires the installation of optional '
34 'dependencies: specifically, PySocks. For more information, see '
35 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
36 ),
37 DependencyWarning
38 )
39 raise
40
41 from socket import error as SocketError, timeout as SocketTimeout
42
43 from ..connection import (
44 HTTPConnection, HTTPSConnection
45 )
46 from ..connectionpool import (
47 HTTPConnectionPool, HTTPSConnectionPool
48 )
49 from ..exceptions import ConnectTimeoutError, NewConnectionError
50 from ..poolmanager import PoolManager
51 from ..util.url import parse_url
52
53 try:
54 import ssl
55 except ImportError:
56 ssl = None
57
58
59 class SOCKSConnection(HTTPConnection):
60 """
61 A plain-text HTTP connection that connects via a SOCKS proxy.
62 """
63 def __init__(self, *args, **kwargs):
64 self._socks_options = kwargs.pop('_socks_options')
65 super(SOCKSConnection, self).__init__(*args, **kwargs)
66
67 def _new_conn(self):
68 """
69 Establish a new connection via the SOCKS proxy.
70 """
71 extra_kw = {}
72 if self.source_address:
73 extra_kw['source_address'] = self.source_address
74
75 if self.socket_options:
76 extra_kw['socket_options'] = self.socket_options
77
78 try:
79 conn = socks.create_connection(
80 (self.host, self.port),
81 proxy_type=self._socks_options['socks_version'],
82 proxy_addr=self._socks_options['proxy_host'],
83 proxy_port=self._socks_options['proxy_port'],
84 proxy_username=self._socks_options['username'],
85 proxy_password=self._socks_options['password'],
86 proxy_rdns=self._socks_options['rdns'],
87 timeout=self.timeout,
88 **extra_kw
89 )
90
91 except SocketTimeout:
92 raise ConnectTimeoutError(
93 self, "Connection to %s timed out. (connect timeout=%s)" %
94 (self.host, self.timeout))
95
96 except socks.ProxyError as e:
97 # This is fragile as hell, but it seems to be the only way to raise
98 # useful errors here.
99 if e.socket_err:
100 error = e.socket_err
101 if isinstance(error, SocketTimeout):
102 raise ConnectTimeoutError(
103 self,
104 "Connection to %s timed out. (connect timeout=%s)" %
105 (self.host, self.timeout)
106 )
107 else:
108 raise NewConnectionError(
109 self,
110 "Failed to establish a new connection: %s" % error
111 )
112 else:
113 raise NewConnectionError(
114 self,
115 "Failed to establish a new connection: %s" % e
116 )
117
118 except SocketError as e: # Defensive: PySocks should catch all these.
119 raise NewConnectionError(
120 self, "Failed to establish a new connection: %s" % e)
121
122 return conn
123
124
125 # We don't need to duplicate the Verified/Unverified distinction from
126 # urllib3/connection.py here because the HTTPSConnection will already have been
127 # correctly set to either the Verified or Unverified form by that module. This
128 # means the SOCKSHTTPSConnection will automatically be the correct type.
129 class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
130 pass
131
132
133 class SOCKSHTTPConnectionPool(HTTPConnectionPool):
134 ConnectionCls = SOCKSConnection
135
136
137 class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
138 ConnectionCls = SOCKSHTTPSConnection
139
140
141 class SOCKSProxyManager(PoolManager):
142 """
143 A version of the urllib3 ProxyManager that routes connections via the
144 defined SOCKS proxy.
145 """
146 pool_classes_by_scheme = {
147 'http': SOCKSHTTPConnectionPool,
148 'https': SOCKSHTTPSConnectionPool,
149 }
150
151 def __init__(self, proxy_url, username=None, password=None,
152 num_pools=10, headers=None, **connection_pool_kw):
153 parsed = parse_url(proxy_url)
154
155 if username is None and password is None and parsed.auth is not None:
156 split = parsed.auth.split(':')
157 if len(split) == 2:
158 username, password = split
159 if parsed.scheme == 'socks5':
160 socks_version = socks.PROXY_TYPE_SOCKS5
161 rdns = False
162 elif parsed.scheme == 'socks5h':
163 socks_version = socks.PROXY_TYPE_SOCKS5
164 rdns = True
165 elif parsed.scheme == 'socks4':
166 socks_version = socks.PROXY_TYPE_SOCKS4
167 rdns = False
168 elif parsed.scheme == 'socks4a':
169 socks_version = socks.PROXY_TYPE_SOCKS4
170 rdns = True
171 else:
172 raise ValueError(
173 "Unable to determine SOCKS version from %s" % proxy_url
174 )
175
176 self.proxy_url = proxy_url
177
178 socks_options = {
179 'socks_version': socks_version,
180 'proxy_host': parsed.host,
181 'proxy_port': parsed.port,
182 'username': username,
183 'password': password,
184 'rdns': rdns
185 }
186 connection_pool_kw['_socks_options'] = socks_options
187
188 super(SOCKSProxyManager, self).__init__(
189 num_pools, headers, **connection_pool_kw
190 )
191
192 self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
193
[end of src/urllib3/contrib/socks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/urllib3/contrib/socks.py b/src/urllib3/contrib/socks.py
--- a/src/urllib3/contrib/socks.py
+++ b/src/urllib3/contrib/socks.py
@@ -1,25 +1,38 @@
# -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
-- SOCKS4
-- SOCKS4a
-- SOCKS5
+- SOCKS4A (``proxy_url='socks4a://...``)
+- SOCKS4 (``proxy_url='socks4://...``)
+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
+- SOCKS5 with local DNS (``proxy_url='socks5://...``)
- Usernames and passwords for the SOCKS proxy
-Known Limitations:
+ .. note::
+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
+ your ``proxy_url`` to ensure that DNS resolution is done from the remote
+ server instead of client-side when connecting to a domain name.
+
+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
+supports IPv4, IPv6, and domain names.
+
+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
+will be sent as the ``userid`` section of the SOCKS request::
+
+ proxy_url="socks4a://<userid>@proxy-host"
+
+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
+of the ``proxy_url`` will be sent as the username/password to authenticate
+with the proxy::
+
+ proxy_url="socks5h://<username>:<password>@proxy-host"
-- Currently PySocks does not support contacting remote websites via literal
- IPv6 addresses. Any such connection attempt will fail. You must use a domain
- name.
-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
- such connection attempt will fail.
"""
from __future__ import absolute_import
|
{"golden_diff": "diff --git a/src/urllib3/contrib/socks.py b/src/urllib3/contrib/socks.py\n--- a/src/urllib3/contrib/socks.py\n+++ b/src/urllib3/contrib/socks.py\n@@ -1,25 +1,38 @@\n # -*- coding: utf-8 -*-\n \"\"\"\n This module contains provisional support for SOCKS proxies from within\n-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and\n+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and\n SOCKS5. To enable its functionality, either install PySocks or install this\n module with the ``socks`` extra.\n \n The SOCKS implementation supports the full range of urllib3 features. It also\n supports the following SOCKS features:\n \n-- SOCKS4\n-- SOCKS4a\n-- SOCKS5\n+- SOCKS4A (``proxy_url='socks4a://...``)\n+- SOCKS4 (``proxy_url='socks4://...``)\n+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)\n+- SOCKS5 with local DNS (``proxy_url='socks5://...``)\n - Usernames and passwords for the SOCKS proxy\n \n-Known Limitations:\n+ .. note::\n+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in\n+ your ``proxy_url`` to ensure that DNS resolution is done from the remote\n+ server instead of client-side when connecting to a domain name.\n+\n+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5\n+supports IPv4, IPv6, and domain names.\n+\n+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``\n+will be sent as the ``userid`` section of the SOCKS request::\n+\n+ proxy_url=\"socks4a://<userid>@proxy-host\"\n+\n+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion\n+of the ``proxy_url`` will be sent as the username/password to authenticate\n+with the proxy::\n+\n+ proxy_url=\"socks5h://<username>:<password>@proxy-host\"\n \n-- Currently PySocks does not support contacting remote websites via literal\n- IPv6 addresses. Any such connection attempt will fail. You must use a domain\n- name.\n-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any\n- such connection attempt will fail.\n \"\"\"\n from __future__ import absolute_import\n", "issue": "Update our SOCKS documentation\n- `socks4a://` is useless, all DNS queries are resolved client-side for SOCKS4.\r\n- `socks5://` should have a warning about DNS queries being resolved client-side and that `socks5h://` is recommended for secure use (Should have been the default on our end).\r\n- PySocks supports IPv6 connections to the SOCKS proxy.\r\n- Need to check all our other assumptions in those docs.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains provisional support for SOCKS proxies from within\nurllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and\nSOCKS5. To enable its functionality, either install PySocks or install this\nmodule with the ``socks`` extra.\n\nThe SOCKS implementation supports the full range of urllib3 features. It also\nsupports the following SOCKS features:\n\n- SOCKS4\n- SOCKS4a\n- SOCKS5\n- Usernames and passwords for the SOCKS proxy\n\nKnown Limitations:\n\n- Currently PySocks does not support contacting remote websites via literal\n IPv6 addresses. Any such connection attempt will fail. You must use a domain\n name.\n- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any\n such connection attempt will fail.\n\"\"\"\nfrom __future__ import absolute_import\n\ntry:\n import socks\nexcept ImportError:\n import warnings\n from ..exceptions import DependencyWarning\n\n warnings.warn((\n 'SOCKS support in urllib3 requires the installation of optional '\n 'dependencies: specifically, PySocks. For more information, see '\n 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'\n ),\n DependencyWarning\n )\n raise\n\nfrom socket import error as SocketError, timeout as SocketTimeout\n\nfrom ..connection import (\n HTTPConnection, HTTPSConnection\n)\nfrom ..connectionpool import (\n HTTPConnectionPool, HTTPSConnectionPool\n)\nfrom ..exceptions import ConnectTimeoutError, NewConnectionError\nfrom ..poolmanager import PoolManager\nfrom ..util.url import parse_url\n\ntry:\n import ssl\nexcept ImportError:\n ssl = None\n\n\nclass SOCKSConnection(HTTPConnection):\n \"\"\"\n A plain-text HTTP connection that connects via a SOCKS proxy.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._socks_options = kwargs.pop('_socks_options')\n super(SOCKSConnection, self).__init__(*args, **kwargs)\n\n def _new_conn(self):\n \"\"\"\n Establish a new connection via the SOCKS proxy.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw['source_address'] = self.source_address\n\n if self.socket_options:\n extra_kw['socket_options'] = self.socket_options\n\n try:\n conn = socks.create_connection(\n (self.host, self.port),\n proxy_type=self._socks_options['socks_version'],\n proxy_addr=self._socks_options['proxy_host'],\n proxy_port=self._socks_options['proxy_port'],\n proxy_username=self._socks_options['username'],\n proxy_password=self._socks_options['password'],\n proxy_rdns=self._socks_options['rdns'],\n timeout=self.timeout,\n **extra_kw\n )\n\n except SocketTimeout:\n raise ConnectTimeoutError(\n self, \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout))\n\n except socks.ProxyError as e:\n # This is fragile as hell, but it seems to be the only way to raise\n # useful errors here.\n if e.socket_err:\n error = e.socket_err\n if isinstance(error, SocketTimeout):\n raise ConnectTimeoutError(\n self,\n \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout)\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % error\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % e\n )\n\n except SocketError as e: # Defensive: PySocks should catch all these.\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e)\n\n return conn\n\n\n# We don't need to duplicate the Verified/Unverified distinction from\n# urllib3/connection.py here because the HTTPSConnection will already have been\n# correctly set to either the Verified or Unverified form by that module. This\n# means the SOCKSHTTPSConnection will automatically be the correct type.\nclass SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):\n pass\n\n\nclass SOCKSHTTPConnectionPool(HTTPConnectionPool):\n ConnectionCls = SOCKSConnection\n\n\nclass SOCKSHTTPSConnectionPool(HTTPSConnectionPool):\n ConnectionCls = SOCKSHTTPSConnection\n\n\nclass SOCKSProxyManager(PoolManager):\n \"\"\"\n A version of the urllib3 ProxyManager that routes connections via the\n defined SOCKS proxy.\n \"\"\"\n pool_classes_by_scheme = {\n 'http': SOCKSHTTPConnectionPool,\n 'https': SOCKSHTTPSConnectionPool,\n }\n\n def __init__(self, proxy_url, username=None, password=None,\n num_pools=10, headers=None, **connection_pool_kw):\n parsed = parse_url(proxy_url)\n\n if username is None and password is None and parsed.auth is not None:\n split = parsed.auth.split(':')\n if len(split) == 2:\n username, password = split\n if parsed.scheme == 'socks5':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = False\n elif parsed.scheme == 'socks5h':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = True\n elif parsed.scheme == 'socks4':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = False\n elif parsed.scheme == 'socks4a':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = True\n else:\n raise ValueError(\n \"Unable to determine SOCKS version from %s\" % proxy_url\n )\n\n self.proxy_url = proxy_url\n\n socks_options = {\n 'socks_version': socks_version,\n 'proxy_host': parsed.host,\n 'proxy_port': parsed.port,\n 'username': username,\n 'password': password,\n 'rdns': rdns\n }\n connection_pool_kw['_socks_options'] = socks_options\n\n super(SOCKSProxyManager, self).__init__(\n num_pools, headers, **connection_pool_kw\n )\n\n self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme\n", "path": "src/urllib3/contrib/socks.py"}]}
| 2,493 | 582 |
gh_patches_debug_19971
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-476
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding more documentation
Currently, we only have the API reference. There's no other documentation at the moment.
Here are a few references we could take inspiration from/copy.
- [@rougier's tutorial](http://www.loria.fr/~rougier/teaching/opengl/)
- Recipe from the IPython Cookbook (link coming soon)
- [Paper in Frontiers in Neuroinformatics](http://journal.frontiersin.org/Journal/10.3389/fninf.2013.00036/full)
</issue>
<code>
[start of vispy/__init__.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """
6
7 =====
8 Vispy
9 =====
10
11 Vispy is a collaborative project that has the goal to allow more sharing
12 of code between visualization projects based on OpenGL. It does this
13 by providing powerful interfaces to OpenGL, at different levels of
14 abstraction and generality.
15
16 Vispy consists of the following modules:
17 * vispy.app: for creating windows, timers and mainloops for various backends
18 * vispy.gloo: Object oriented GL API
19 * vispy.gloo.gl: Low level OpenGL API
20 * vispy.util: various utilities
21 * vispy.scene: Higher level visualization objects (work in progress)
22 * vispy.mpl_plot: matplotlib interface (work in progress)
23 * ... more to come
24
25 Vispy comes with a powerful event system and a simple application
26 framework that works on multiple backends. This allows easy creation
27 of figures, and enables integrating visualizations in a GUI application.
28
29 For more information see http://vispy.org.
30 """
31
32 from __future__ import division
33
34 __all__ = ['use', 'sys_info', 'set_log_level', 'test']
35
36 # Definition of the version number
37 __version__ = '0.3'
38
39
40 from .util import (_parse_command_line_arguments, config, # noqa
41 set_log_level, keys, sys_info, test) # noqa
42 from .util.wrappers import use # noqa
43
44 _parse_command_line_arguments()
45
[end of vispy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vispy/__init__.py b/vispy/__init__.py
--- a/vispy/__init__.py
+++ b/vispy/__init__.py
@@ -8,25 +8,13 @@
Vispy
=====
-Vispy is a collaborative project that has the goal to allow more sharing
-of code between visualization projects based on OpenGL. It does this
-by providing powerful interfaces to OpenGL, at different levels of
-abstraction and generality.
-
-Vispy consists of the following modules:
- * vispy.app: for creating windows, timers and mainloops for various backends
- * vispy.gloo: Object oriented GL API
- * vispy.gloo.gl: Low level OpenGL API
- * vispy.util: various utilities
- * vispy.scene: Higher level visualization objects (work in progress)
- * vispy.mpl_plot: matplotlib interface (work in progress)
- * ... more to come
-
-Vispy comes with a powerful event system and a simple application
-framework that works on multiple backends. This allows easy creation
-of figures, and enables integrating visualizations in a GUI application.
-
-For more information see http://vispy.org.
+Vispy is a **high-performance interactive 2D/3D data visualization
+library**. Vispy leverages the computational power of modern **Graphics
+Processing Units (GPUs)** through the **OpenGL** library to display very
+large datasets.
+
+For more information, see http://vispy.org.
+
"""
from __future__ import division
|
{"golden_diff": "diff --git a/vispy/__init__.py b/vispy/__init__.py\n--- a/vispy/__init__.py\n+++ b/vispy/__init__.py\n@@ -8,25 +8,13 @@\n Vispy\n =====\n \n-Vispy is a collaborative project that has the goal to allow more sharing\n-of code between visualization projects based on OpenGL. It does this\n-by providing powerful interfaces to OpenGL, at different levels of\n-abstraction and generality.\n-\n-Vispy consists of the following modules:\n- * vispy.app: for creating windows, timers and mainloops for various backends\n- * vispy.gloo: Object oriented GL API\n- * vispy.gloo.gl: Low level OpenGL API\n- * vispy.util: various utilities\n- * vispy.scene: Higher level visualization objects (work in progress)\n- * vispy.mpl_plot: matplotlib interface (work in progress)\n- * ... more to come\n-\n-Vispy comes with a powerful event system and a simple application\n-framework that works on multiple backends. This allows easy creation\n-of figures, and enables integrating visualizations in a GUI application.\n-\n-For more information see http://vispy.org.\n+Vispy is a **high-performance interactive 2D/3D data visualization\n+library**. Vispy leverages the computational power of modern **Graphics\n+Processing Units (GPUs)** through the **OpenGL** library to display very\n+large datasets.\n+\n+For more information, see http://vispy.org.\n+\n \"\"\"\n \n from __future__ import division\n", "issue": "Adding more documentation\nCurrently, we only have the API reference. There's no other documentation at the moment.\n\nHere are a few references we could take inspiration from/copy.\n- [@rougier's tutorial](http://www.loria.fr/~rougier/teaching/opengl/)\n- Recipe from the IPython Cookbook (link coming soon)\n- [Paper in Frontiers in Neuroinformatics](http://journal.frontiersin.org/Journal/10.3389/fninf.2013.00036/full)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\n\n=====\nVispy\n=====\n\nVispy is a collaborative project that has the goal to allow more sharing\nof code between visualization projects based on OpenGL. It does this\nby providing powerful interfaces to OpenGL, at different levels of\nabstraction and generality.\n\nVispy consists of the following modules:\n * vispy.app: for creating windows, timers and mainloops for various backends\n * vispy.gloo: Object oriented GL API\n * vispy.gloo.gl: Low level OpenGL API\n * vispy.util: various utilities\n * vispy.scene: Higher level visualization objects (work in progress)\n * vispy.mpl_plot: matplotlib interface (work in progress)\n * ... more to come\n\nVispy comes with a powerful event system and a simple application\nframework that works on multiple backends. This allows easy creation\nof figures, and enables integrating visualizations in a GUI application.\n\nFor more information see http://vispy.org.\n\"\"\"\n\nfrom __future__ import division\n\n__all__ = ['use', 'sys_info', 'set_log_level', 'test']\n\n# Definition of the version number\n__version__ = '0.3'\n\n\nfrom .util import (_parse_command_line_arguments, config, # noqa\n set_log_level, keys, sys_info, test) # noqa\nfrom .util.wrappers import use # noqa\n\n_parse_command_line_arguments()\n", "path": "vispy/__init__.py"}]}
| 1,077 | 343 |
gh_patches_debug_11506
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3169
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors when installed munkres is > 1.0.x
### Problem
When invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.
Running this command in verbose (`-vv`) mode:
```python-traceback
> beet -vv import .
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 583, in _build_master
ws.require(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 900, in require
needed = self.resolve(parse_requirements(requirements))
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 791, in resolve
raise VersionConflict(dist, req).with_context(dependent_req)
pkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/beet", line 6, in <module>
from pkg_resources import load_entry_point
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3191, in <module>
@_call_aside
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3175, in _call_aside
f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3204, in _initialize_master_working_set
working_set = WorkingSet._build_master()
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 585, in _build_master
return cls._build_from_requirements(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 598, in _build_from_requirements
dists = ws.resolve(reqs, Environment())
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 786, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets
```
### Setup
* OS: Linux (Arch Linux)
* Python version: 3.7.2
* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)
* Turning off plugins made problem go away (yes/no): Didn't try.
My configuration (output of `beet config`) is:
Not relevant.
Errors when installed munkres is > 1.0.x
### Problem
When invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.
Running this command in verbose (`-vv`) mode:
```python-traceback
> beet -vv import .
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 583, in _build_master
ws.require(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 900, in require
needed = self.resolve(parse_requirements(requirements))
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 791, in resolve
raise VersionConflict(dist, req).with_context(dependent_req)
pkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/beet", line 6, in <module>
from pkg_resources import load_entry_point
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3191, in <module>
@_call_aside
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3175, in _call_aside
f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3204, in _initialize_master_working_set
working_set = WorkingSet._build_master()
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 585, in _build_master
return cls._build_from_requirements(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 598, in _build_from_requirements
dists = ws.resolve(reqs, Environment())
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 786, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets
```
### Setup
* OS: Linux (Arch Linux)
* Python version: 3.7.2
* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)
* Turning off plugins made problem go away (yes/no): Didn't try.
My configuration (output of `beet config`) is:
Not relevant.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # This file is part of beets.
5 # Copyright 2016, Adrian Sampson.
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining
8 # a copy of this software and associated documentation files (the
9 # "Software"), to deal in the Software without restriction, including
10 # without limitation the rights to use, copy, modify, merge, publish,
11 # distribute, sublicense, and/or sell copies of the Software, and to
12 # permit persons to whom the Software is furnished to do so, subject to
13 # the following conditions:
14 #
15 # The above copyright notice and this permission notice shall be
16 # included in all copies or substantial portions of the Software.
17
18 from __future__ import division, absolute_import, print_function
19
20 import os
21 import sys
22 import subprocess
23 import shutil
24 from setuptools import setup
25
26
27 def _read(fn):
28 path = os.path.join(os.path.dirname(__file__), fn)
29 return open(path).read()
30
31
32 def build_manpages():
33 # Go into the docs directory and build the manpage.
34 docdir = os.path.join(os.path.dirname(__file__), 'docs')
35 curdir = os.getcwd()
36 os.chdir(docdir)
37 try:
38 subprocess.check_call(['make', 'man'])
39 except OSError:
40 print("Could not build manpages (make man failed)!", file=sys.stderr)
41 return
42 finally:
43 os.chdir(curdir)
44
45 # Copy resulting manpages.
46 mandir = os.path.join(os.path.dirname(__file__), 'man')
47 if os.path.exists(mandir):
48 shutil.rmtree(mandir)
49 shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)
50
51
52 # Build manpages if we're making a source distribution tarball.
53 if 'sdist' in sys.argv:
54 build_manpages()
55
56
57 setup(
58 name='beets',
59 version='1.4.8',
60 description='music tagger and library organizer',
61 author='Adrian Sampson',
62 author_email='[email protected]',
63 url='http://beets.io/',
64 license='MIT',
65 platforms='ALL',
66 long_description=_read('README.rst'),
67 test_suite='test.testall.suite',
68 include_package_data=True, # Install plugin resources.
69
70 packages=[
71 'beets',
72 'beets.ui',
73 'beets.autotag',
74 'beets.util',
75 'beets.dbcore',
76 'beetsplug',
77 'beetsplug.bpd',
78 'beetsplug.web',
79 'beetsplug.lastgenre',
80 'beetsplug.metasync',
81 ],
82 entry_points={
83 'console_scripts': [
84 'beet = beets.ui:main',
85 ],
86 },
87
88 install_requires=[
89 'six>=1.9',
90 'mutagen>=1.33',
91 'munkres~=1.0.0',
92 'unidecode',
93 'musicbrainzngs>=0.4',
94 'pyyaml',
95 ] + (
96 # Use the backport of Python 3.4's `enum` module.
97 ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []
98 ) + (
99 # Pin a Python 2-compatible version of Jellyfish.
100 ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']
101 ) + (
102 # Support for ANSI console colors on Windows.
103 ['colorama'] if (sys.platform == 'win32') else []
104 ),
105
106 tests_require=[
107 'beautifulsoup4',
108 'flask',
109 'mock',
110 'pylast',
111 'rarfile',
112 'responses',
113 'pyxdg',
114 'pathlib',
115 'python-mpd2',
116 'discogs-client'
117 ],
118
119 # Plugin (optional) dependencies:
120 extras_require={
121 'absubmit': ['requests'],
122 'fetchart': ['requests'],
123 'chroma': ['pyacoustid'],
124 'discogs': ['discogs-client>=2.2.1'],
125 'beatport': ['requests-oauthlib>=0.6.1'],
126 'lastgenre': ['pylast'],
127 'mpdstats': ['python-mpd2>=0.4.2'],
128 'web': ['flask', 'flask-cors'],
129 'import': ['rarfile'],
130 'thumbnails': ['pyxdg'] +
131 (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),
132 'metasync': ['dbus-python'],
133 },
134 # Non-Python/non-PyPI plugin dependencies:
135 # convert: ffmpeg
136 # bpd: python-gi and GStreamer
137 # absubmit: extractor binary from http://acousticbrainz.org/download
138
139 classifiers=[
140 'Topic :: Multimedia :: Sound/Audio',
141 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
142 'License :: OSI Approved :: MIT License',
143 'Environment :: Console',
144 'Environment :: Web Environment',
145 'Programming Language :: Python',
146 'Programming Language :: Python :: 2',
147 'Programming Language :: Python :: 2.7',
148 'Programming Language :: Python :: 3',
149 'Programming Language :: Python :: 3.4',
150 'Programming Language :: Python :: 3.5',
151 'Programming Language :: Python :: 3.6',
152 'Programming Language :: Python :: 3.7',
153 'Programming Language :: Python :: Implementation :: CPython',
154 ],
155 )
156
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -88,10 +88,14 @@
install_requires=[
'six>=1.9',
'mutagen>=1.33',
- 'munkres~=1.0.0',
'unidecode',
'musicbrainzngs>=0.4',
'pyyaml',
+ ] + [
+ # Avoid a version of munkres incompatible with Python 3.
+ 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else
+ 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else
+ 'munkres>=1.0.0',
] + (
# Use the backport of Python 3.4's `enum` module.
['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,10 +88,14 @@\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n- 'munkres~=1.0.0',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n+ ] + [\n+ # Avoid a version of munkres incompatible with Python 3.\n+ 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else\n+ 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else\n+ 'munkres>=1.0.0',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n", "issue": "Errors when installed munkres is > 1.0.x\n### Problem\r\n\r\nWhen invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.\r\n\r\nRunning this command in verbose (`-vv`) mode:\r\n\r\n```python-traceback\r\n> beet -vv import .\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 583, in _build_master\r\n ws.require(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 900, in require\r\n needed = self.resolve(parse_requirements(requirements))\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 791, in resolve\r\n raise VersionConflict(dist, req).with_context(dependent_req)\r\npkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/beet\", line 6, in <module>\r\n from pkg_resources import load_entry_point\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3191, in <module>\r\n @_call_aside\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3175, in _call_aside\r\n f(*args, **kwargs)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3204, in _initialize_master_working_set\r\n working_set = WorkingSet._build_master()\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 585, in _build_master\r\n return cls._build_from_requirements(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 598, in _build_from_requirements\r\n dists = ws.resolve(reqs, Environment())\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 786, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets\r\n```\r\n\r\n### Setup\r\n\r\n* OS: Linux (Arch Linux)\r\n* Python version: 3.7.2\r\n* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)\r\n* Turning off plugins made problem go away (yes/no): Didn't try.\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\nNot relevant.\nErrors when installed munkres is > 1.0.x\n### Problem\r\n\r\nWhen invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.\r\n\r\nRunning this command in verbose (`-vv`) mode:\r\n\r\n```python-traceback\r\n> beet -vv import .\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 583, in _build_master\r\n ws.require(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 900, in require\r\n needed = self.resolve(parse_requirements(requirements))\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 791, in resolve\r\n raise VersionConflict(dist, req).with_context(dependent_req)\r\npkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/beet\", line 6, in <module>\r\n from pkg_resources import load_entry_point\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3191, in <module>\r\n @_call_aside\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3175, in _call_aside\r\n f(*args, **kwargs)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3204, in _initialize_master_working_set\r\n working_set = WorkingSet._build_master()\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 585, in _build_master\r\n return cls._build_from_requirements(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 598, in _build_from_requirements\r\n dists = ws.resolve(reqs, Environment())\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 786, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets\r\n```\r\n\r\n### Setup\r\n\r\n* OS: Linux (Arch Linux)\r\n* Python version: 3.7.2\r\n* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)\r\n* Turning off plugins made problem go away (yes/no): Didn't try.\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\nNot relevant.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), 'docs')\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call(['make', 'man'])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), 'man')\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif 'sdist' in sys.argv:\n build_manpages()\n\n\nsetup(\n name='beets',\n version='1.4.8',\n description='music tagger and library organizer',\n author='Adrian Sampson',\n author_email='[email protected]',\n url='http://beets.io/',\n license='MIT',\n platforms='ALL',\n long_description=_read('README.rst'),\n test_suite='test.testall.suite',\n include_package_data=True, # Install plugin resources.\n\n packages=[\n 'beets',\n 'beets.ui',\n 'beets.autotag',\n 'beets.util',\n 'beets.dbcore',\n 'beetsplug',\n 'beetsplug.bpd',\n 'beetsplug.web',\n 'beetsplug.lastgenre',\n 'beetsplug.metasync',\n ],\n entry_points={\n 'console_scripts': [\n 'beet = beets.ui:main',\n ],\n },\n\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n 'munkres~=1.0.0',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n ) + (\n # Pin a Python 2-compatible version of Jellyfish.\n ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']\n ) + (\n # Support for ANSI console colors on Windows.\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n\n tests_require=[\n 'beautifulsoup4',\n 'flask',\n 'mock',\n 'pylast',\n 'rarfile',\n 'responses',\n 'pyxdg',\n 'pathlib',\n 'python-mpd2',\n 'discogs-client'\n ],\n\n # Plugin (optional) dependencies:\n extras_require={\n 'absubmit': ['requests'],\n 'fetchart': ['requests'],\n 'chroma': ['pyacoustid'],\n 'discogs': ['discogs-client>=2.2.1'],\n 'beatport': ['requests-oauthlib>=0.6.1'],\n 'lastgenre': ['pylast'],\n 'mpdstats': ['python-mpd2>=0.4.2'],\n 'web': ['flask', 'flask-cors'],\n 'import': ['rarfile'],\n 'thumbnails': ['pyxdg'] +\n (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),\n 'metasync': ['dbus-python'],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # convert: ffmpeg\n # bpd: python-gi and GStreamer\n # absubmit: extractor binary from http://acousticbrainz.org/download\n\n classifiers=[\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n", "path": "setup.py"}]}
| 3,436 | 241 |
gh_patches_debug_25073
|
rasdani/github-patches
|
git_diff
|
modoboa__modoboa-722
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
In auto-reply message form, some issue around AM/PM datetime picker
In auto-reply message form, some issue around AM/PM datetime picker

I can't select AM or PM.
Suggest : enable 24h format
</issue>
<code>
[start of modoboa/extensions/postfix_autoreply/forms.py]
1 # coding: utf-8
2 from django import forms
3 from django.utils.translation import ugettext as _, ugettext_lazy
4 from django.utils import timezone
5 from .models import ARmessage
6
7
8 class ARmessageForm(forms.ModelForm):
9 fromdate = forms.DateTimeField(
10 label=ugettext_lazy('From'),
11 required=False,
12 help_text=ugettext_lazy("Activate your auto reply from this date"),
13 widget=forms.TextInput(
14 attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
15 )
16 )
17 untildate = forms.DateTimeField(
18 label=ugettext_lazy('Until'),
19 required=False,
20 help_text=ugettext_lazy("Activate your auto reply until this date"),
21 widget=forms.TextInput(
22 attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
23 )
24 )
25 subject = forms.CharField(
26 widget=forms.TextInput(
27 attrs={'class': 'form-control'}
28 )
29 )
30 content = forms.CharField(
31 widget=forms.Textarea(
32 attrs={'class': 'form-control'}
33 )
34 )
35
36 class Meta:
37 model = ARmessage
38 fields = ('subject', 'content', 'enabled')
39
40 def __init__(self, *args, **kwargs):
41 super(ARmessageForm, self).__init__(*args, **kwargs)
42 self.fields.keyOrder = [
43 'subject', 'content', 'fromdate', 'untildate', 'enabled'
44 ]
45 if 'instance' in kwargs and kwargs['instance'] is not None:
46 self.fields['fromdate'].initial = \
47 kwargs['instance'].fromdate.replace(second=0, microsecond=0)
48 self.fields['untildate'].initial = kwargs['instance'].untildate
49
50 def clean(self):
51 """Custom fields validaton.
52
53 We want to be sure that fromdate < untildate and that they are
54 both in the future ONLY IF the autoreply is beeing activated.
55
56 """
57 super(ARmessageForm, self).clean()
58 if self._errors:
59 raise forms.ValidationError(self._errors)
60 if not self.cleaned_data["enabled"]:
61 return self.cleaned_data
62 if self.cleaned_data["fromdate"] is not None:
63 if self.cleaned_data["fromdate"] < timezone.now():
64 self._errors["fromdate"] = self.error_class(
65 [_("This date is over")])
66 del self.cleaned_data['fromdate']
67 else:
68 self.cleaned_data['fromdate'] = timezone.now()
69 if self.cleaned_data["untildate"] is not None:
70 if self.cleaned_data["untildate"] < timezone.now():
71 self._errors["untildate"] = self.error_class(
72 [_("This date is over")])
73 del self.cleaned_data['untildate']
74 elif 'fromdate' in self.cleaned_data and \
75 self.cleaned_data['untildate'] < self.cleaned_data['fromdate']:
76 self._errors["untildate"] = \
77 self.error_class([_("Must be greater than start date")])
78 del self.cleaned_data['untildate']
79 return self.cleaned_data
80
[end of modoboa/extensions/postfix_autoreply/forms.py]
[start of modoboa/extensions/postfix_autoreply/general_callbacks.py]
1 from django.utils import timezone
2 from django.utils.translation import ugettext_lazy
3 from modoboa.lib import events, parameters
4 from modoboa.extensions.postfix_autoreply.models import Transport, Alias
5 from .models import ARmessage
6
7
8 @events.observe("ExtraUprefsJS")
9 def extra_js(user):
10 return ["""function autoreply_cb() {
11 $('.datefield').datetimepicker({
12 format: 'YYYY-MM-DD hh:mm:ss',
13 language: '%(lang)s'
14 });
15 }
16 """ % {'lang': parameters.get_user(user, "LANG", app="core")}
17 ]
18
19
20 @events.observe("UserMenuDisplay")
21 def menu(target, user):
22 if target != "uprefs_menu":
23 return []
24 if not user.mailbox_set.count():
25 return []
26 return [
27 {"name": "autoreply",
28 "class": "ajaxnav",
29 "url": "autoreply/",
30 "label": ugettext_lazy("Auto-reply message")}
31 ]
32
33
34 @events.observe("DomainCreated")
35 def onDomainCreated(user, domain):
36 transport = Transport()
37 transport.domain = "autoreply.%s" % domain.name
38 transport.method = "autoreply:"
39 transport.save()
40
41
42 @events.observe("DomainModified")
43 def onDomainModified(domain):
44 if domain.oldname == domain.name:
45 return
46 Transport.objects.filter(domain="autoreply.%s" % domain.oldname) \
47 .update(domain="autoreply.%s" % domain.name)
48 for al in Alias.objects.filter(full_address__contains="@%s" % domain.oldname):
49 new_address = al.full_address.replace("@%s" % domain.oldname, "@%s" % domain.name)
50 al.full_address = new_address
51 al.autoreply_address = "%s@autoreply.%s" % (new_address, domain.name)
52 al.save()
53
54
55 @events.observe("DomainDeleted")
56 def onDomainDeleted(domain):
57 Transport.objects.filter(domain="autoreply.%s" % domain.name).delete()
58
59
60 @events.observe("MailboxCreated")
61 def onMailboxCreated(user, mailbox):
62 alias = Alias()
63 alias.full_address = mailbox.full_address
64 alias.autoreply_address = \
65 "%s@autoreply.%s" % (mailbox.full_address, mailbox.domain.name)
66 alias.save()
67
68
69 @events.observe("MailboxDeleted")
70 def onMailboxDeleted(mailboxes):
71 from modoboa.extensions.admin.models import Mailbox
72
73 if isinstance(mailboxes, Mailbox):
74 mailboxes = [mailboxes]
75 for mailbox in mailboxes:
76 try:
77 alias = Alias.objects.get(full_address=mailbox.full_address)
78 except Alias.DoesNotExist:
79 pass
80 else:
81 alias.delete()
82
83
84 @events.observe("MailboxModified")
85 def onMailboxModified(mailbox):
86 if not hasattr(mailbox, 'old_full_address'):
87 return
88 if mailbox.full_address == mailbox.old_full_address:
89 return
90 alias = Alias.objects.get(full_address=mailbox.old_full_address)
91 alias.full_address = mailbox.full_address
92 alias.autoreply_address = \
93 "%s@autoreply.%s" % (mailbox.full_address, mailbox.domain.name)
94 alias.save()
95
96
97 @events.observe("ExtraFormFields")
98 def extra_mailform_fields(form_name, mailbox=None):
99 """Define extra fields to include in mail forms.
100
101 For now, only the auto-reply state can be modified.
102
103 :param str form_name: form name (must be 'mailform')
104 :param Mailbox mailbox: mailbox
105 """
106 from modoboa.lib.formutils import YesNoField
107
108 if form_name != "mailform":
109 return []
110 status = False
111 if mailbox is not None and mailbox.armessage_set.count():
112 status = mailbox.armessage_set.all()[0].enabled
113 return [
114 ('autoreply', YesNoField(
115 label=ugettext_lazy("Enable auto-reply"),
116 initial="yes" if status else "no",
117 help_text=ugettext_lazy("Enable or disable Postfix auto-reply")
118 ))
119 ]
120
121
122 @events.observe("SaveExtraFormFields")
123 def save_extra_mailform_fields(form_name, mailbox, values):
124 """Set the auto-reply status for a mailbox.
125
126 If a corresponding auto-reply message exists, we update its
127 status. Otherwise, we create a message using default values.
128
129 :param str form_name: form name (must be 'mailform')
130 :param Mailbox mailbox: mailbox
131 :param dict values: form values
132 """
133 if form_name != 'mailform':
134 return
135 if mailbox.armessage_set.count():
136 arm = mailbox.armessage_set.all()[0]
137 else:
138 arm = ARmessage(mbox=mailbox)
139 arm.subject = parameters.get_admin("DEFAULT_SUBJECT")
140 arm.content = parameters.get_admin("DEFAULT_CONTENT") \
141 % {'name': mailbox.user.fullname}
142 arm.fromdate = timezone.now()
143 arm.enabled = True if values['autoreply'] == 'yes' else False
144 arm.save()
145
[end of modoboa/extensions/postfix_autoreply/general_callbacks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modoboa/extensions/postfix_autoreply/forms.py b/modoboa/extensions/postfix_autoreply/forms.py
--- a/modoboa/extensions/postfix_autoreply/forms.py
+++ b/modoboa/extensions/postfix_autoreply/forms.py
@@ -11,7 +11,7 @@
required=False,
help_text=ugettext_lazy("Activate your auto reply from this date"),
widget=forms.TextInput(
- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
+ attrs={'class': 'datefield form-control'}
)
)
untildate = forms.DateTimeField(
@@ -19,7 +19,7 @@
required=False,
help_text=ugettext_lazy("Activate your auto reply until this date"),
widget=forms.TextInput(
- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
+ attrs={'class': 'datefield form-control'}
)
)
subject = forms.CharField(
diff --git a/modoboa/extensions/postfix_autoreply/general_callbacks.py b/modoboa/extensions/postfix_autoreply/general_callbacks.py
--- a/modoboa/extensions/postfix_autoreply/general_callbacks.py
+++ b/modoboa/extensions/postfix_autoreply/general_callbacks.py
@@ -9,7 +9,7 @@
def extra_js(user):
return ["""function autoreply_cb() {
$('.datefield').datetimepicker({
- format: 'YYYY-MM-DD hh:mm:ss',
+ format: 'YYYY-MM-DD HH:mm:ss',
language: '%(lang)s'
});
}
|
{"golden_diff": "diff --git a/modoboa/extensions/postfix_autoreply/forms.py b/modoboa/extensions/postfix_autoreply/forms.py\n--- a/modoboa/extensions/postfix_autoreply/forms.py\n+++ b/modoboa/extensions/postfix_autoreply/forms.py\n@@ -11,7 +11,7 @@\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply from this date\"),\n widget=forms.TextInput(\n- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n+ attrs={'class': 'datefield form-control'}\n )\n )\n untildate = forms.DateTimeField(\n@@ -19,7 +19,7 @@\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply until this date\"),\n widget=forms.TextInput(\n- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n+ attrs={'class': 'datefield form-control'}\n )\n )\n subject = forms.CharField(\ndiff --git a/modoboa/extensions/postfix_autoreply/general_callbacks.py b/modoboa/extensions/postfix_autoreply/general_callbacks.py\n--- a/modoboa/extensions/postfix_autoreply/general_callbacks.py\n+++ b/modoboa/extensions/postfix_autoreply/general_callbacks.py\n@@ -9,7 +9,7 @@\n def extra_js(user):\n return [\"\"\"function autoreply_cb() {\n $('.datefield').datetimepicker({\n- format: 'YYYY-MM-DD hh:mm:ss',\n+ format: 'YYYY-MM-DD HH:mm:ss',\n language: '%(lang)s'\n });\n }\n", "issue": "In auto-reply message form, some issue around AM/PM datetime picker\nIn auto-reply message form, some issue around AM/PM datetime picker\n\n\nI can't select AM or PM.\n\nSuggest : enable 24h format\n\n", "before_files": [{"content": "# coding: utf-8\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\nfrom django.utils import timezone\nfrom .models import ARmessage\n\n\nclass ARmessageForm(forms.ModelForm):\n fromdate = forms.DateTimeField(\n label=ugettext_lazy('From'),\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply from this date\"),\n widget=forms.TextInput(\n attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n )\n )\n untildate = forms.DateTimeField(\n label=ugettext_lazy('Until'),\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply until this date\"),\n widget=forms.TextInput(\n attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n )\n )\n subject = forms.CharField(\n widget=forms.TextInput(\n attrs={'class': 'form-control'}\n )\n )\n content = forms.CharField(\n widget=forms.Textarea(\n attrs={'class': 'form-control'}\n )\n )\n\n class Meta:\n model = ARmessage\n fields = ('subject', 'content', 'enabled')\n\n def __init__(self, *args, **kwargs):\n super(ARmessageForm, self).__init__(*args, **kwargs)\n self.fields.keyOrder = [\n 'subject', 'content', 'fromdate', 'untildate', 'enabled'\n ]\n if 'instance' in kwargs and kwargs['instance'] is not None:\n self.fields['fromdate'].initial = \\\n kwargs['instance'].fromdate.replace(second=0, microsecond=0)\n self.fields['untildate'].initial = kwargs['instance'].untildate\n\n def clean(self):\n \"\"\"Custom fields validaton.\n\n We want to be sure that fromdate < untildate and that they are\n both in the future ONLY IF the autoreply is beeing activated.\n\n \"\"\"\n super(ARmessageForm, self).clean()\n if self._errors:\n raise forms.ValidationError(self._errors)\n if not self.cleaned_data[\"enabled\"]:\n return self.cleaned_data\n if self.cleaned_data[\"fromdate\"] is not None:\n if self.cleaned_data[\"fromdate\"] < timezone.now():\n self._errors[\"fromdate\"] = self.error_class(\n [_(\"This date is over\")])\n del self.cleaned_data['fromdate']\n else:\n self.cleaned_data['fromdate'] = timezone.now()\n if self.cleaned_data[\"untildate\"] is not None:\n if self.cleaned_data[\"untildate\"] < timezone.now():\n self._errors[\"untildate\"] = self.error_class(\n [_(\"This date is over\")])\n del self.cleaned_data['untildate']\n elif 'fromdate' in self.cleaned_data and \\\n self.cleaned_data['untildate'] < self.cleaned_data['fromdate']:\n self._errors[\"untildate\"] = \\\n self.error_class([_(\"Must be greater than start date\")])\n del self.cleaned_data['untildate']\n return self.cleaned_data\n", "path": "modoboa/extensions/postfix_autoreply/forms.py"}, {"content": "from django.utils import timezone\nfrom django.utils.translation import ugettext_lazy\nfrom modoboa.lib import events, parameters\nfrom modoboa.extensions.postfix_autoreply.models import Transport, Alias\nfrom .models import ARmessage\n\n\[email protected](\"ExtraUprefsJS\")\ndef extra_js(user):\n return [\"\"\"function autoreply_cb() {\n $('.datefield').datetimepicker({\n format: 'YYYY-MM-DD hh:mm:ss',\n language: '%(lang)s'\n });\n}\n\"\"\" % {'lang': parameters.get_user(user, \"LANG\", app=\"core\")}\n ]\n\n\[email protected](\"UserMenuDisplay\")\ndef menu(target, user):\n if target != \"uprefs_menu\":\n return []\n if not user.mailbox_set.count():\n return []\n return [\n {\"name\": \"autoreply\",\n \"class\": \"ajaxnav\",\n \"url\": \"autoreply/\",\n \"label\": ugettext_lazy(\"Auto-reply message\")}\n ]\n\n\[email protected](\"DomainCreated\")\ndef onDomainCreated(user, domain):\n transport = Transport()\n transport.domain = \"autoreply.%s\" % domain.name\n transport.method = \"autoreply:\"\n transport.save()\n\n\[email protected](\"DomainModified\")\ndef onDomainModified(domain):\n if domain.oldname == domain.name:\n return\n Transport.objects.filter(domain=\"autoreply.%s\" % domain.oldname) \\\n .update(domain=\"autoreply.%s\" % domain.name)\n for al in Alias.objects.filter(full_address__contains=\"@%s\" % domain.oldname):\n new_address = al.full_address.replace(\"@%s\" % domain.oldname, \"@%s\" % domain.name)\n al.full_address = new_address\n al.autoreply_address = \"%s@autoreply.%s\" % (new_address, domain.name)\n al.save()\n\n\[email protected](\"DomainDeleted\")\ndef onDomainDeleted(domain):\n Transport.objects.filter(domain=\"autoreply.%s\" % domain.name).delete()\n\n\[email protected](\"MailboxCreated\")\ndef onMailboxCreated(user, mailbox):\n alias = Alias()\n alias.full_address = mailbox.full_address\n alias.autoreply_address = \\\n \"%s@autoreply.%s\" % (mailbox.full_address, mailbox.domain.name)\n alias.save()\n\n\[email protected](\"MailboxDeleted\")\ndef onMailboxDeleted(mailboxes):\n from modoboa.extensions.admin.models import Mailbox\n\n if isinstance(mailboxes, Mailbox):\n mailboxes = [mailboxes]\n for mailbox in mailboxes:\n try:\n alias = Alias.objects.get(full_address=mailbox.full_address)\n except Alias.DoesNotExist:\n pass\n else:\n alias.delete()\n\n\[email protected](\"MailboxModified\")\ndef onMailboxModified(mailbox):\n if not hasattr(mailbox, 'old_full_address'):\n return\n if mailbox.full_address == mailbox.old_full_address:\n return\n alias = Alias.objects.get(full_address=mailbox.old_full_address)\n alias.full_address = mailbox.full_address\n alias.autoreply_address = \\\n \"%s@autoreply.%s\" % (mailbox.full_address, mailbox.domain.name)\n alias.save()\n\n\[email protected](\"ExtraFormFields\")\ndef extra_mailform_fields(form_name, mailbox=None):\n \"\"\"Define extra fields to include in mail forms.\n\n For now, only the auto-reply state can be modified.\n\n :param str form_name: form name (must be 'mailform')\n :param Mailbox mailbox: mailbox\n \"\"\"\n from modoboa.lib.formutils import YesNoField\n\n if form_name != \"mailform\":\n return []\n status = False\n if mailbox is not None and mailbox.armessage_set.count():\n status = mailbox.armessage_set.all()[0].enabled\n return [\n ('autoreply', YesNoField(\n label=ugettext_lazy(\"Enable auto-reply\"),\n initial=\"yes\" if status else \"no\",\n help_text=ugettext_lazy(\"Enable or disable Postfix auto-reply\")\n ))\n ]\n\n\[email protected](\"SaveExtraFormFields\")\ndef save_extra_mailform_fields(form_name, mailbox, values):\n \"\"\"Set the auto-reply status for a mailbox.\n\n If a corresponding auto-reply message exists, we update its\n status. Otherwise, we create a message using default values.\n\n :param str form_name: form name (must be 'mailform')\n :param Mailbox mailbox: mailbox\n :param dict values: form values\n \"\"\"\n if form_name != 'mailform':\n return\n if mailbox.armessage_set.count():\n arm = mailbox.armessage_set.all()[0]\n else:\n arm = ARmessage(mbox=mailbox)\n arm.subject = parameters.get_admin(\"DEFAULT_SUBJECT\")\n arm.content = parameters.get_admin(\"DEFAULT_CONTENT\") \\\n % {'name': mailbox.user.fullname}\n arm.fromdate = timezone.now()\n arm.enabled = True if values['autoreply'] == 'yes' else False\n arm.save()\n", "path": "modoboa/extensions/postfix_autoreply/general_callbacks.py"}]}
| 2,914 | 339 |
gh_patches_debug_11619
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-440
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Function Role property doesn't work with string value
The following results in a TypeError for the Role property type. I don't get a TypeError if I use GetAtt for Role's type, but I need to use a string. Looks like an issue in which the expected type is a list of possible types, but the validation on types then thinks the value should be a list instead of being one of the types in the list.
```
code = Code(
S3Bucket=self.s3Bucket,
S3Key=self.s3Key
)
func = Function(
"title",
Code=code,
Handler="index.handler",
Role="role-arn-here",
Runtime="nodejs",
)
```
```
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 66, in __init__
self.__setattr__(k, v)
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 110, in __setattr__
self._raise_type(name, value, expected_type)
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 145, in _raise_type
expected_type))
TypeError: <class 'troposphere.awslambda.Function'>: EventssomeFunction.Role is <class 'str'>, expected [<class 'str'>, <class 'troposphere.GetAtt'>]
```
Suspected issue spot in `__init__.py:`
```
# If we're expecting a list, then make sure it is a list
if not isinstance(value, list):
self._raise_type(name, value, expected_type)
```
And the definition of Role in `awslambda.py:`
```
class Function(AWSObject):
resource_type = "AWS::Lambda::Function"
props = {
'Code': (Code, True),
'Description': (str, False),
'Handler': (str, True),
'MemorySize': (positive_integer, False),
'Role': ([str, GetAtt], True),
'Runtime': (str, True),
'Timeout': (positive_integer, False),
}
```
</issue>
<code>
[start of troposphere/awslambda.py]
1 from . import AWSObject, AWSProperty, GetAtt
2 from .validators import positive_integer
3
4
5 class Code(AWSProperty):
6 props = {
7 'S3Bucket': (basestring, False),
8 'S3Key': (basestring, False),
9 'S3ObjectVersion': (basestring, False),
10 'ZipFile': (basestring, False)
11 }
12
13 def validate(self):
14 zip_file = self.properties.get('ZipFile')
15 s3_bucket = self.properties.get('S3Bucket')
16 s3_key = self.properties.get('S3Key')
17 s3_object_version = self.properties.get('SS3ObjectVersion')
18
19 if zip_file and s3_bucket:
20 raise ValueError("You can't specify both 'S3Bucket' and 'ZipFile'")
21 if zip_file and s3_key:
22 raise ValueError("You can't specify both 'S3Key' and 'ZipFile'")
23 if zip_file and s3_object_version:
24 raise ValueError(
25 "You can't specify both 'S3ObjectVersion' and 'ZipFile'"
26 )
27 if not zip_file and not (s3_bucket and s3_key):
28 raise ValueError(
29 "You must specify a bucket location (both the 'S3Bucket' and "
30 "'S3Key' properties) or the 'ZipFile' property"
31 )
32
33
34 class EventSourceMapping(AWSObject):
35 resource_type = "AWS::Lambda::EventSourceMapping"
36
37 props = {
38 'BatchSize': (positive_integer, False),
39 'Enabled': (bool, False),
40 'EventSourceArn': (basestring, True),
41 'FunctionName': (basestring, True),
42 'StartingPosition': (basestring, True),
43 }
44
45
46 class Function(AWSObject):
47 resource_type = "AWS::Lambda::Function"
48
49 props = {
50 'Code': (Code, True),
51 'Description': (basestring, False),
52 'Handler': (basestring, True),
53 'MemorySize': (positive_integer, False),
54 'Role': ([basestring, GetAtt], True),
55 'Runtime': (basestring, True),
56 'Timeout': (positive_integer, False),
57 }
58
59
60 class Permission(AWSObject):
61 resource_type = "AWS::Lambda::Permission"
62
63 props = {
64 'Action': (basestring, True),
65 'FunctionName': (basestring, True),
66 'Principal': (basestring, True),
67 'SourceAccount': (basestring, False),
68 'SourceArn': (basestring, False),
69 }
70
[end of troposphere/awslambda.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/troposphere/awslambda.py b/troposphere/awslambda.py
--- a/troposphere/awslambda.py
+++ b/troposphere/awslambda.py
@@ -1,4 +1,4 @@
-from . import AWSObject, AWSProperty, GetAtt
+from . import AWSObject, AWSProperty
from .validators import positive_integer
@@ -51,7 +51,7 @@
'Description': (basestring, False),
'Handler': (basestring, True),
'MemorySize': (positive_integer, False),
- 'Role': ([basestring, GetAtt], True),
+ 'Role': (basestring, True),
'Runtime': (basestring, True),
'Timeout': (positive_integer, False),
}
|
{"golden_diff": "diff --git a/troposphere/awslambda.py b/troposphere/awslambda.py\n--- a/troposphere/awslambda.py\n+++ b/troposphere/awslambda.py\n@@ -1,4 +1,4 @@\n-from . import AWSObject, AWSProperty, GetAtt\n+from . import AWSObject, AWSProperty\n from .validators import positive_integer\n \n \n@@ -51,7 +51,7 @@\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n- 'Role': ([basestring, GetAtt], True),\n+ 'Role': (basestring, True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n", "issue": "Function Role property doesn't work with string value\nThe following results in a TypeError for the Role property type. I don't get a TypeError if I use GetAtt for Role's type, but I need to use a string. Looks like an issue in which the expected type is a list of possible types, but the validation on types then thinks the value should be a list instead of being one of the types in the list.\n\n```\ncode = Code(\n S3Bucket=self.s3Bucket,\n S3Key=self.s3Key\n )\n\nfunc = Function(\n \"title\",\n Code=code,\n Handler=\"index.handler\",\n Role=\"role-arn-here\",\n Runtime=\"nodejs\",\n )\n```\n\n```\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 66, in __init__\n self.__setattr__(k, v)\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 110, in __setattr__\n self._raise_type(name, value, expected_type)\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 145, in _raise_type\n expected_type))\nTypeError: <class 'troposphere.awslambda.Function'>: EventssomeFunction.Role is <class 'str'>, expected [<class 'str'>, <class 'troposphere.GetAtt'>]\n```\n\nSuspected issue spot in `__init__.py:`\n\n```\n# If we're expecting a list, then make sure it is a list\nif not isinstance(value, list):\n self._raise_type(name, value, expected_type)\n```\n\nAnd the definition of Role in `awslambda.py:`\n\n```\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (str, False),\n 'Handler': (str, True),\n 'MemorySize': (positive_integer, False),\n 'Role': ([str, GetAtt], True),\n 'Runtime': (str, True),\n 'Timeout': (positive_integer, False),\n }\n```\n\n", "before_files": [{"content": "from . import AWSObject, AWSProperty, GetAtt\nfrom .validators import positive_integer\n\n\nclass Code(AWSProperty):\n props = {\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'ZipFile': (basestring, False)\n }\n\n def validate(self):\n zip_file = self.properties.get('ZipFile')\n s3_bucket = self.properties.get('S3Bucket')\n s3_key = self.properties.get('S3Key')\n s3_object_version = self.properties.get('SS3ObjectVersion')\n\n if zip_file and s3_bucket:\n raise ValueError(\"You can't specify both 'S3Bucket' and 'ZipFile'\")\n if zip_file and s3_key:\n raise ValueError(\"You can't specify both 'S3Key' and 'ZipFile'\")\n if zip_file and s3_object_version:\n raise ValueError(\n \"You can't specify both 'S3ObjectVersion' and 'ZipFile'\"\n )\n if not zip_file and not (s3_bucket and s3_key):\n raise ValueError(\n \"You must specify a bucket location (both the 'S3Bucket' and \"\n \"'S3Key' properties) or the 'ZipFile' property\"\n )\n\n\nclass EventSourceMapping(AWSObject):\n resource_type = \"AWS::Lambda::EventSourceMapping\"\n\n props = {\n 'BatchSize': (positive_integer, False),\n 'Enabled': (bool, False),\n 'EventSourceArn': (basestring, True),\n 'FunctionName': (basestring, True),\n 'StartingPosition': (basestring, True),\n }\n\n\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n 'Role': ([basestring, GetAtt], True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n\n\nclass Permission(AWSObject):\n resource_type = \"AWS::Lambda::Permission\"\n\n props = {\n 'Action': (basestring, True),\n 'FunctionName': (basestring, True),\n 'Principal': (basestring, True),\n 'SourceAccount': (basestring, False),\n 'SourceArn': (basestring, False),\n }\n", "path": "troposphere/awslambda.py"}]}
| 1,738 | 174 |
gh_patches_debug_20882
|
rasdani/github-patches
|
git_diff
|
OpenEnergyPlatform__oeplatform-699
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tutorial form: Markdown for code does not work
See example tutorial
https://openenergy-platform.org/tutorials/12/
This would be important for our beginners tutorial to work:
https://openenergy-platform.org/tutorials/11/
> See also #405
</issue>
<code>
[start of tutorials/views.py]
1 from django.shortcuts import render, redirect
2 from django.views import View
3 from django.views.generic.edit import CreateView, UpdateView, DeleteView
4 from django.urls import exceptions, reverse_lazy
5 from django.contrib.auth.mixins import LoginRequiredMixin
6
7 import os
8 import json
9
10 from copy import deepcopy
11
12 from django.conf import settings
13
14 from markdown2 import Markdown
15
16 from .forms import TutorialForm
17 from .models import Tutorial
18
19 import re
20 # Create your views here.
21
22 youtubeUrlRegex = re.compile('^.*youtube\.com\/watch\?v=(?P<id>[A-z0-9]+)$')
23
24 def _resolveStaticTutorial(tutorial):
25 try:
26 with open(os.path.join(settings.BASE_DIR, "examples", "build", tutorial["fileName"]), 'r') as buildFile:
27 buildFileContent = buildFile.read()
28
29 return {
30 "html": buildFileContent
31 }
32
33 except:
34 return {"html": "Tutorial is missing"}
35
36
37 def _resolveStaticTutorials():
38 resolvedTutorials = []
39
40 # Load list of static tutorials
41
42 try:
43 with open(os.path.join(settings.BASE_DIR, "examples", "build", 'meta.json'), 'r') as metaFile:
44 metaContent = json.load(metaFile)
45
46 for tutorial in metaContent:
47 rTut = _resolveStaticTutorial(tutorial)
48 resolvedTutorials.append({
49 'id': tutorial['id'],
50 'fileName': tutorial['fileName'],
51 'title': tutorial['title'] or tutorial['fileName'],
52 'html': rTut['html'],
53 })
54
55 return sorted(resolvedTutorials, key=lambda x: x["title"])
56 except Exception as e:
57 print('Static tutorials could not be loaded, error=%s' % e)
58 # If we do not have a generated meta.json or we cannot read them, we just do not return any static
59 # tutorials. This is completly fine and dynamic tutorials can be used like normal.
60 return []
61
62
63 def _resolveDynamicTutorial(evaluatedQs):
64 """
65
66
67 :param evaluatedQs: Evaluated queryset object
68 :return:
69 """
70
71 # Initialize dict that stores a tutorial
72 currentTutorial = {'id': '', 'title': '', 'html': '', 'markdown': '', 'category': '', 'media_src': '', 'level': ''}
73
74 # populate dict
75 currentTutorial.update(id=str(evaluatedQs.id),
76 title=evaluatedQs.title,
77 html=evaluatedQs.html,
78 markdown=evaluatedQs.markdown,
79 category= evaluatedQs.category,
80 media_src= evaluatedQs.media_src,
81 level=evaluatedQs.level)
82
83 return currentTutorial
84
85
86 def _resolveDynamicTutorials(tutorials_qs):
87 """
88 Evaluates a QuerySet and passes each evaluated object to the next function which returns a python
89 dictionary that contains all parameters from the object as dict. The dict is added to a list to
90 later merge the static and dynamic tutorials together.
91
92 :param tutorials_qs:
93 :return:
94 """
95 resolvedTutorials = []
96
97 for tutorial in tutorials_qs:
98 paramsToAdd = _resolveDynamicTutorial(tutorial)
99
100 resolvedTutorials.append(paramsToAdd)
101
102 return resolvedTutorials
103
104
105 def _gatherTutorials(id=None):
106 """
107 Collects all existing tutorials (static/dynamic) and returns them as a list. If an id is
108 specified as parameter a specific tutorial is returned filtered by id.
109
110 :param id:
111 :return:
112 """
113
114 # Retrieve allTutorials objects from db and cache
115 dynamicTutorialsQs = Tutorial.objects.all()
116
117 tutorials = _resolveStaticTutorials()
118 tutorials.extend(_resolveDynamicTutorials(dynamicTutorialsQs))
119
120 if id:
121 filteredElement = list(filter(lambda tutorial: tutorial["id"] == id, tutorials))[0]
122 return filteredElement
123
124 return tutorials
125
126 def _processFormInput(form):
127 tutorial = form.save(commit=False)
128 # Add more information to the dataset like date, time, contributor ...
129
130 if tutorial.media_src:
131 matchResult = youtubeUrlRegex.match(tutorial.media_src)
132 videoId = matchResult.group(1) if matchResult else None
133 if videoId:
134 tutorial.media_src = "https://www.youtube.com/embed/" + videoId
135
136 return tutorial
137
138 def formattedMarkdown(markdown):
139 """
140 A parameter is used to enter a text formatted as markdown that is formatted
141 to html and returned. This functionality is implemented using Markdown2.
142
143 :param markdown:
144 :return:
145 """
146
147 # escapes html but also escapes html code blocks lke "exampel code:
148 # (1 tab) code"
149 # checkbox also not rendered as expected "- [ ]"
150 markdowner = Markdown(safe_mode=True)
151 markdowner.html_removed_text = ""
152
153 return markdowner.convert(markdown)
154
155
156 class ListTutorials(View):
157 def get(self, request):
158 """
159 Load and list the available tutorials.
160
161 :param request: A HTTP-request object sent by the Django framework.
162 :return: Tutorials renderer
163 """
164
165 # Gathering all tutorials
166
167 tutorials = _gatherTutorials()
168
169 return render(
170 request, 'list.html', {"tutorials": tutorials}
171 )
172
173
174 class TutorialDetail(View):
175 def get(self, request, tutorial_id):
176 """
177 Detail view for specific tutorial.
178
179 :param request: A HTTP-request object sent by the Django framework.
180 :return: Tutorials renderer
181 """
182
183 # Gathering all tutorials
184
185 tutorial = _gatherTutorials(tutorial_id)
186
187 return render(
188 request, 'detail.html', {"tutorial": tutorial}
189 )
190
191
192 class CreateNewTutorial(LoginRequiredMixin, CreateView):
193 template_name = 'add.html'
194 redirect_url = 'detail_tutorial'
195 form_class = TutorialForm
196 login_url = '/user/login/'
197 redirect_field_name = 'redirect_to'
198
199 def form_valid(self, form):
200 """
201 validates a form and stores the values in the database and inserts a
202 value for the tutorials field html.
203
204 :param form:
205 :return:
206 """
207
208 tutorial = _processFormInput(form)
209 tutorial.save()
210
211 # Convert markdown to HTML and save to db
212 _html = formattedMarkdown(tutorial.markdown)
213 addHtml = Tutorial.objects.get(pk=tutorial.id)
214 addHtml.html = _html
215 addHtml.save()
216
217 return redirect(self.redirect_url, tutorial_id=tutorial.id)
218
219 def addTutorialFromMarkdownFile(self):
220 pass
221
222
223 class EditTutorials(LoginRequiredMixin, UpdateView):
224 template_name = 'add.html'
225 redirect_url = 'detail_tutorial'
226 model = Tutorial
227 form_class = TutorialForm
228 pk_url_kwarg = 'tutorial_id'
229 login_url = '/user/login/'
230 redirect_field_name = 'redirect_to'
231
232 def form_valid(self, form):
233 """
234 validates a form and stores the values in the database and inserts a
235 value for the tutorials field html.
236
237 :param form:
238 :return:
239 """
240 tutorial = _processFormInput(form)
241 tutorial.save()
242
243 _html = formattedMarkdown(tutorial.markdown)
244 addHtml = Tutorial.objects.get(pk=tutorial.id)
245 addHtml.html = _html
246 addHtml.save()
247
248 return redirect(self.redirect_url, tutorial_id=tutorial.id)
249
250
251 class DeleteTutorial(LoginRequiredMixin, DeleteView):
252 template_name = 'tutorial_confirm_delete.html'
253 model = Tutorial
254 pk_url_kwarg = 'tutorial_id'
255 success_url = reverse_lazy('list_tutorials')
256 login_url = '/user/login/'
257 redirect_field_name = 'redirect_to'
258
259
260
261
262
263
264
265
266
[end of tutorials/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tutorials/views.py b/tutorials/views.py
--- a/tutorials/views.py
+++ b/tutorials/views.py
@@ -147,7 +147,8 @@
# escapes html but also escapes html code blocks lke "exampel code:
# (1 tab) code"
# checkbox also not rendered as expected "- [ ]"
- markdowner = Markdown(safe_mode=True)
+ # TODO: Add syntax highliting, add css files -> https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks
+ markdowner = Markdown( extras=["break-on-newline", "fenced-code-blocks"], safe_mode=True)
markdowner.html_removed_text = ""
return markdowner.convert(markdown)
@@ -210,6 +211,7 @@
# Convert markdown to HTML and save to db
_html = formattedMarkdown(tutorial.markdown)
+ print(_html)
addHtml = Tutorial.objects.get(pk=tutorial.id)
addHtml.html = _html
addHtml.save()
|
{"golden_diff": "diff --git a/tutorials/views.py b/tutorials/views.py\n--- a/tutorials/views.py\n+++ b/tutorials/views.py\n@@ -147,7 +147,8 @@\n # escapes html but also escapes html code blocks lke \"exampel code:\n # (1 tab) code\"\n # checkbox also not rendered as expected \"- [ ]\"\n- markdowner = Markdown(safe_mode=True)\n+ # TODO: Add syntax highliting, add css files -> https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks \n+ markdowner = Markdown( extras=[\"break-on-newline\", \"fenced-code-blocks\"], safe_mode=True)\n markdowner.html_removed_text = \"\"\n \n return markdowner.convert(markdown)\n@@ -210,6 +211,7 @@\n \n # Convert markdown to HTML and save to db\n _html = formattedMarkdown(tutorial.markdown)\n+ print(_html)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n", "issue": "Tutorial form: Markdown for code does not work\nSee example tutorial\r\nhttps://openenergy-platform.org/tutorials/12/\r\n\r\nThis would be important for our beginners tutorial to work:\r\nhttps://openenergy-platform.org/tutorials/11/\r\n \r\n\r\n> See also #405 \r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import exceptions, reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nimport os\nimport json\n\nfrom copy import deepcopy\n\nfrom django.conf import settings\n\nfrom markdown2 import Markdown\n\nfrom .forms import TutorialForm\nfrom .models import Tutorial\n\nimport re\n# Create your views here.\n\nyoutubeUrlRegex = re.compile('^.*youtube\\.com\\/watch\\?v=(?P<id>[A-z0-9]+)$')\n\ndef _resolveStaticTutorial(tutorial):\n try:\n with open(os.path.join(settings.BASE_DIR, \"examples\", \"build\", tutorial[\"fileName\"]), 'r') as buildFile:\n buildFileContent = buildFile.read()\n\n return {\n \"html\": buildFileContent\n }\n\n except:\n return {\"html\": \"Tutorial is missing\"}\n\n\ndef _resolveStaticTutorials():\n resolvedTutorials = []\n\n # Load list of static tutorials\n\n try:\n with open(os.path.join(settings.BASE_DIR, \"examples\", \"build\", 'meta.json'), 'r') as metaFile:\n metaContent = json.load(metaFile)\n\n for tutorial in metaContent:\n rTut = _resolveStaticTutorial(tutorial)\n resolvedTutorials.append({\n 'id': tutorial['id'],\n 'fileName': tutorial['fileName'],\n 'title': tutorial['title'] or tutorial['fileName'],\n 'html': rTut['html'],\n })\n\n return sorted(resolvedTutorials, key=lambda x: x[\"title\"])\n except Exception as e:\n print('Static tutorials could not be loaded, error=%s' % e)\n # If we do not have a generated meta.json or we cannot read them, we just do not return any static\n # tutorials. This is completly fine and dynamic tutorials can be used like normal.\n return []\n\n\ndef _resolveDynamicTutorial(evaluatedQs):\n \"\"\"\n\n\n :param evaluatedQs: Evaluated queryset object\n :return:\n \"\"\"\n\n # Initialize dict that stores a tutorial\n currentTutorial = {'id': '', 'title': '', 'html': '', 'markdown': '', 'category': '', 'media_src': '', 'level': ''}\n\n # populate dict\n currentTutorial.update(id=str(evaluatedQs.id),\n title=evaluatedQs.title,\n html=evaluatedQs.html,\n markdown=evaluatedQs.markdown,\n category= evaluatedQs.category,\n media_src= evaluatedQs.media_src,\n level=evaluatedQs.level)\n\n return currentTutorial\n\n\ndef _resolveDynamicTutorials(tutorials_qs):\n \"\"\"\n Evaluates a QuerySet and passes each evaluated object to the next function which returns a python\n dictionary that contains all parameters from the object as dict. The dict is added to a list to\n later merge the static and dynamic tutorials together.\n\n :param tutorials_qs:\n :return:\n \"\"\"\n resolvedTutorials = []\n\n for tutorial in tutorials_qs:\n paramsToAdd = _resolveDynamicTutorial(tutorial)\n\n resolvedTutorials.append(paramsToAdd)\n\n return resolvedTutorials\n\n\ndef _gatherTutorials(id=None):\n \"\"\"\n Collects all existing tutorials (static/dynamic) and returns them as a list. If an id is\n specified as parameter a specific tutorial is returned filtered by id.\n\n :param id:\n :return:\n \"\"\"\n\n # Retrieve allTutorials objects from db and cache\n dynamicTutorialsQs = Tutorial.objects.all()\n\n tutorials = _resolveStaticTutorials()\n tutorials.extend(_resolveDynamicTutorials(dynamicTutorialsQs))\n\n if id:\n filteredElement = list(filter(lambda tutorial: tutorial[\"id\"] == id, tutorials))[0]\n return filteredElement\n\n return tutorials\n\ndef _processFormInput(form):\n tutorial = form.save(commit=False)\n # Add more information to the dataset like date, time, contributor ...\n\n if tutorial.media_src:\n matchResult = youtubeUrlRegex.match(tutorial.media_src)\n videoId = matchResult.group(1) if matchResult else None\n if videoId:\n tutorial.media_src = \"https://www.youtube.com/embed/\" + videoId\n\n return tutorial\n\ndef formattedMarkdown(markdown):\n \"\"\"\n A parameter is used to enter a text formatted as markdown that is formatted\n to html and returned. This functionality is implemented using Markdown2.\n\n :param markdown:\n :return:\n \"\"\"\n\n # escapes html but also escapes html code blocks lke \"exampel code:\n # (1 tab) code\"\n # checkbox also not rendered as expected \"- [ ]\"\n markdowner = Markdown(safe_mode=True)\n markdowner.html_removed_text = \"\"\n\n return markdowner.convert(markdown)\n\n\nclass ListTutorials(View):\n def get(self, request):\n \"\"\"\n Load and list the available tutorials.\n\n :param request: A HTTP-request object sent by the Django framework.\n :return: Tutorials renderer\n \"\"\"\n\n # Gathering all tutorials\n\n tutorials = _gatherTutorials()\n\n return render(\n request, 'list.html', {\"tutorials\": tutorials}\n )\n\n\nclass TutorialDetail(View):\n def get(self, request, tutorial_id):\n \"\"\"\n Detail view for specific tutorial.\n\n :param request: A HTTP-request object sent by the Django framework.\n :return: Tutorials renderer\n \"\"\"\n\n # Gathering all tutorials\n\n tutorial = _gatherTutorials(tutorial_id)\n\n return render(\n request, 'detail.html', {\"tutorial\": tutorial}\n )\n\n\nclass CreateNewTutorial(LoginRequiredMixin, CreateView):\n template_name = 'add.html'\n redirect_url = 'detail_tutorial'\n form_class = TutorialForm\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n \"\"\"\n validates a form and stores the values in the database and inserts a\n value for the tutorials field html.\n\n :param form:\n :return:\n \"\"\"\n\n tutorial = _processFormInput(form)\n tutorial.save()\n\n # Convert markdown to HTML and save to db\n _html = formattedMarkdown(tutorial.markdown)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n\n return redirect(self.redirect_url, tutorial_id=tutorial.id)\n\n def addTutorialFromMarkdownFile(self):\n pass\n\n\nclass EditTutorials(LoginRequiredMixin, UpdateView):\n template_name = 'add.html'\n redirect_url = 'detail_tutorial'\n model = Tutorial\n form_class = TutorialForm\n pk_url_kwarg = 'tutorial_id'\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n \"\"\"\n validates a form and stores the values in the database and inserts a\n value for the tutorials field html.\n\n :param form:\n :return:\n \"\"\"\n tutorial = _processFormInput(form)\n tutorial.save()\n\n _html = formattedMarkdown(tutorial.markdown)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n\n return redirect(self.redirect_url, tutorial_id=tutorial.id)\n\n\nclass DeleteTutorial(LoginRequiredMixin, DeleteView):\n template_name = 'tutorial_confirm_delete.html'\n model = Tutorial\n pk_url_kwarg = 'tutorial_id'\n success_url = reverse_lazy('list_tutorials')\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n\n\n\n\n\n\n\n", "path": "tutorials/views.py"}]}
| 2,951 | 236 |
gh_patches_debug_9363
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1530
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"not in UPPER_CASE_WITH_UNDERSCORES" should not apply to public/external constants
### Describe the issue:
`public`/`external` fields of a contract are exported as functions, so they are part of a public API, and Slither should not report the warning `not in UPPER_CASE_WITH_UNDERSCORES` if these fields are in lowercase.
### Code example to reproduce the issue:
```
contract A { uint256 public myVal; }
```
### Version:
0.8.3
### Relevant log output:
```shell
Constant A.myVal (contracts/A.sol#1) is not in UPPER_CASE_WITH_UNDERSCORES
```
```
</issue>
<code>
[start of slither/detectors/naming_convention/naming_convention.py]
1 import re
2 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
3 from slither.formatters.naming_convention.naming_convention import custom_format
4
5
6 class NamingConvention(AbstractDetector):
7 """
8 Check if naming conventions are followed
9 https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions
10
11 Exceptions:
12 - Allow constant variables name/symbol/decimals to be lowercase (ERC20)
13 - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters
14 - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'
15 """
16
17 ARGUMENT = "naming-convention"
18 HELP = "Conformity to Solidity naming conventions"
19 IMPACT = DetectorClassification.INFORMATIONAL
20 CONFIDENCE = DetectorClassification.HIGH
21
22 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions"
23
24 WIKI_TITLE = "Conformance to Solidity naming conventions"
25
26 # region wiki_description
27 WIKI_DESCRIPTION = """
28 Solidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.
29 #### Rule exceptions
30 - Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).
31 - Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters."""
32 # endregion wiki_description
33
34 WIKI_RECOMMENDATION = "Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions)."
35
36 STANDARD_JSON = False
37
38 @staticmethod
39 def is_cap_words(name):
40 return re.search("^[A-Z]([A-Za-z0-9]+)?_?$", name) is not None
41
42 @staticmethod
43 def is_mixed_case(name):
44 return re.search("^[a-z]([A-Za-z0-9]+)?_?$", name) is not None
45
46 @staticmethod
47 def is_mixed_case_with_underscore(name):
48 # Allow _ at the beginning to represent private variable
49 # or unused parameters
50 return re.search("^[_]?[a-z]([A-Za-z0-9]+)?_?$", name) is not None
51
52 @staticmethod
53 def is_upper_case_with_underscores(name):
54 return re.search("^[A-Z0-9_]+_?$", name) is not None
55
56 @staticmethod
57 def should_avoid_name(name):
58 return re.search("^[lOI]$", name) is not None
59
60 def _detect(self): # pylint: disable=too-many-branches,too-many-statements
61
62 results = []
63 for contract in self.contracts:
64
65 if not self.is_cap_words(contract.name):
66 info = ["Contract ", contract, " is not in CapWords\n"]
67
68 res = self.generate_result(info)
69 res.add(contract, {"target": "contract", "convention": "CapWords"})
70 results.append(res)
71
72 for struct in contract.structures_declared:
73 if not self.is_cap_words(struct.name):
74 info = ["Struct ", struct, " is not in CapWords\n"]
75
76 res = self.generate_result(info)
77 res.add(struct, {"target": "structure", "convention": "CapWords"})
78 results.append(res)
79
80 for event in contract.events_declared:
81 if not self.is_cap_words(event.name):
82 info = ["Event ", event, " is not in CapWords\n"]
83
84 res = self.generate_result(info)
85 res.add(event, {"target": "event", "convention": "CapWords"})
86 results.append(res)
87
88 for func in contract.functions_declared:
89 if func.is_constructor:
90 continue
91 if not self.is_mixed_case(func.name):
92 if func.visibility in [
93 "internal",
94 "private",
95 ] and self.is_mixed_case_with_underscore(func.name):
96 continue
97 if func.name.startswith(("echidna_", "crytic_")):
98 continue
99 info = ["Function ", func, " is not in mixedCase\n"]
100
101 res = self.generate_result(info)
102 res.add(func, {"target": "function", "convention": "mixedCase"})
103 results.append(res)
104
105 for argument in func.parameters:
106 # Ignore parameter names that are not specified i.e. empty strings
107 if argument.name == "":
108 continue
109 if argument in func.variables_read_or_written:
110 correct_naming = self.is_mixed_case(argument.name)
111 else:
112 correct_naming = self.is_mixed_case_with_underscore(argument.name)
113 if not correct_naming:
114 info = ["Parameter ", argument, " is not in mixedCase\n"]
115
116 res = self.generate_result(info)
117 res.add(argument, {"target": "parameter", "convention": "mixedCase"})
118 results.append(res)
119
120 for var in contract.state_variables_declared:
121 if self.should_avoid_name(var.name):
122 info = [
123 "Variable ",
124 var,
125 " is single letter l, O, or I, which should not be used\n",
126 ]
127
128 res = self.generate_result(info)
129 res.add(
130 var,
131 {
132 "target": "variable",
133 "convention": "l_O_I_should_not_be_used",
134 },
135 )
136 results.append(res)
137
138 if var.is_constant is True:
139 # For ERC20 compatibility
140 if var.name in ["symbol", "name", "decimals"]:
141 continue
142
143 if not self.is_upper_case_with_underscores(var.name):
144 info = [
145 "Constant ",
146 var,
147 " is not in UPPER_CASE_WITH_UNDERSCORES\n",
148 ]
149
150 res = self.generate_result(info)
151 res.add(
152 var,
153 {
154 "target": "variable_constant",
155 "convention": "UPPER_CASE_WITH_UNDERSCORES",
156 },
157 )
158 results.append(res)
159
160 else:
161 if var.visibility == "private":
162 correct_naming = self.is_mixed_case_with_underscore(var.name)
163 else:
164 correct_naming = self.is_mixed_case(var.name)
165 if not correct_naming:
166 info = ["Variable ", var, " is not in mixedCase\n"]
167
168 res = self.generate_result(info)
169 res.add(var, {"target": "variable", "convention": "mixedCase"})
170 results.append(res)
171
172 for enum in contract.enums_declared:
173 if not self.is_cap_words(enum.name):
174 info = ["Enum ", enum, " is not in CapWords\n"]
175
176 res = self.generate_result(info)
177 res.add(enum, {"target": "enum", "convention": "CapWords"})
178 results.append(res)
179
180 for modifier in contract.modifiers_declared:
181 if not self.is_mixed_case(modifier.name):
182 info = ["Modifier ", modifier, " is not in mixedCase\n"]
183
184 res = self.generate_result(info)
185 res.add(modifier, {"target": "modifier", "convention": "mixedCase"})
186 results.append(res)
187
188 return results
189
190 @staticmethod
191 def _format(slither, result):
192 custom_format(slither, result)
193
[end of slither/detectors/naming_convention/naming_convention.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slither/detectors/naming_convention/naming_convention.py b/slither/detectors/naming_convention/naming_convention.py
--- a/slither/detectors/naming_convention/naming_convention.py
+++ b/slither/detectors/naming_convention/naming_convention.py
@@ -139,7 +139,8 @@
# For ERC20 compatibility
if var.name in ["symbol", "name", "decimals"]:
continue
-
+ if var.visibility == "public":
+ continue
if not self.is_upper_case_with_underscores(var.name):
info = [
"Constant ",
|
{"golden_diff": "diff --git a/slither/detectors/naming_convention/naming_convention.py b/slither/detectors/naming_convention/naming_convention.py\n--- a/slither/detectors/naming_convention/naming_convention.py\n+++ b/slither/detectors/naming_convention/naming_convention.py\n@@ -139,7 +139,8 @@\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n-\n+ if var.visibility == \"public\":\n+ continue\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n", "issue": "\"not in UPPER_CASE_WITH_UNDERSCORES\" should not apply to public/external constants\n### Describe the issue:\r\n\r\n`public`/`external` fields of a contract are exported as functions, so they are part of a public API, and Slither should not report the warning `not in UPPER_CASE_WITH_UNDERSCORES` if these fields are in lowercase.\r\n\r\n### Code example to reproduce the issue:\r\n\r\n```\r\ncontract A { uint256 public myVal; }\r\n```\r\n\r\n### Version:\r\n\r\n0.8.3\r\n\r\n### Relevant log output:\r\n\r\n```shell\r\nConstant A.myVal (contracts/A.sol#1) is not in UPPER_CASE_WITH_UNDERSCORES\r\n```\r\n```\r\n\n", "before_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.naming_convention.naming_convention import custom_format\n\n\nclass NamingConvention(AbstractDetector):\n \"\"\"\n Check if naming conventions are followed\n https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions\n\n Exceptions:\n - Allow constant variables name/symbol/decimals to be lowercase (ERC20)\n - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters\n - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'\n \"\"\"\n\n ARGUMENT = \"naming-convention\"\n HELP = \"Conformity to Solidity naming conventions\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions\"\n\n WIKI_TITLE = \"Conformance to Solidity naming conventions\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"\nSolidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.\n#### Rule exceptions\n- Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).\n- Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters.\"\"\"\n # endregion wiki_description\n\n WIKI_RECOMMENDATION = \"Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions).\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def is_cap_words(name):\n return re.search(\"^[A-Z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case(name):\n return re.search(\"^[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case_with_underscore(name):\n # Allow _ at the beginning to represent private variable\n # or unused parameters\n return re.search(\"^[_]?[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_upper_case_with_underscores(name):\n return re.search(\"^[A-Z0-9_]+_?$\", name) is not None\n\n @staticmethod\n def should_avoid_name(name):\n return re.search(\"^[lOI]$\", name) is not None\n\n def _detect(self): # pylint: disable=too-many-branches,too-many-statements\n\n results = []\n for contract in self.contracts:\n\n if not self.is_cap_words(contract.name):\n info = [\"Contract \", contract, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(contract, {\"target\": \"contract\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for struct in contract.structures_declared:\n if not self.is_cap_words(struct.name):\n info = [\"Struct \", struct, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(struct, {\"target\": \"structure\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for event in contract.events_declared:\n if not self.is_cap_words(event.name):\n info = [\"Event \", event, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(event, {\"target\": \"event\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for func in contract.functions_declared:\n if func.is_constructor:\n continue\n if not self.is_mixed_case(func.name):\n if func.visibility in [\n \"internal\",\n \"private\",\n ] and self.is_mixed_case_with_underscore(func.name):\n continue\n if func.name.startswith((\"echidna_\", \"crytic_\")):\n continue\n info = [\"Function \", func, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(func, {\"target\": \"function\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for argument in func.parameters:\n # Ignore parameter names that are not specified i.e. empty strings\n if argument.name == \"\":\n continue\n if argument in func.variables_read_or_written:\n correct_naming = self.is_mixed_case(argument.name)\n else:\n correct_naming = self.is_mixed_case_with_underscore(argument.name)\n if not correct_naming:\n info = [\"Parameter \", argument, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(argument, {\"target\": \"parameter\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for var in contract.state_variables_declared:\n if self.should_avoid_name(var.name):\n info = [\n \"Variable \",\n var,\n \" is single letter l, O, or I, which should not be used\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable\",\n \"convention\": \"l_O_I_should_not_be_used\",\n },\n )\n results.append(res)\n\n if var.is_constant is True:\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n var,\n \" is not in UPPER_CASE_WITH_UNDERSCORES\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable_constant\",\n \"convention\": \"UPPER_CASE_WITH_UNDERSCORES\",\n },\n )\n results.append(res)\n\n else:\n if var.visibility == \"private\":\n correct_naming = self.is_mixed_case_with_underscore(var.name)\n else:\n correct_naming = self.is_mixed_case(var.name)\n if not correct_naming:\n info = [\"Variable \", var, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(var, {\"target\": \"variable\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for enum in contract.enums_declared:\n if not self.is_cap_words(enum.name):\n info = [\"Enum \", enum, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(enum, {\"target\": \"enum\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for modifier in contract.modifiers_declared:\n if not self.is_mixed_case(modifier.name):\n info = [\"Modifier \", modifier, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(modifier, {\"target\": \"modifier\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n custom_format(slither, result)\n", "path": "slither/detectors/naming_convention/naming_convention.py"}]}
| 2,772 | 144 |
gh_patches_debug_27866
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1247
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Segfault when exiting Command Line Usage Example
<!-- In the following, please describe your issue in detail! -->
<!-- If some of the sections do not apply, just remove them. -->
### Short description
When Running the command line example, and closing the window, I get a segmentation fault
### Code to reproduce
<!-- Please provide a minimal working example that reproduces the issue in the code block below.
Ideally, this should be a full example someone else could run without additional setup. -->
```bash
python -m pyqtgraph.examples
```
Run the command line usage example, and close out the window.
### Expected behavior
Window closes
### Real behavior
Window closes, generates a segfault
```
/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py:125: RuntimeWarning: 'pyqtgraph.examples.__main__' found in sys.modules after import of package 'pyqtgraph.examples', but prior to execution of 'pyqtgraph.examples.__main__'; this may result in unpredictable behaviour
warn(RuntimeWarning(msg))
qt.qpa.fonts: Populating font family aliases took 246 ms. Replace uses of missing font family "FreeMono" with one that exists to avoid this cost.
Using PyQt5 (default graphics system)
Fatal Python error: Segmentation fault
Current thread 0x000000010840bdc0 (most recent call first):
File "/Users/ognyan/Developer/pyqtgraph/.venv/lib/python3.7/site-packages/pyqtgraph/examples/CLIexample.py", line 26 in <module>
```
### Tested environment(s)
* PyQtGraph version: 0.11
* Qt Python binding: PySide2 5.15.0 and PyQt5 5.15.0
* Python version: 3.7.7
* NumPy version: 1.18.4
* Operating system: macOS Catalina
* Installation method: pip
### Additional context
This is not an issue with 0.11.rc0
</issue>
<code>
[start of pyqtgraph/graphicsWindows.py]
1 # -*- coding: utf-8 -*-
2 """
3 DEPRECATED: The classes below are convenience classes that create a new window
4 containting a single, specific widget. These classes are now unnecessary because
5 it is possible to place any widget into its own window by simply calling its
6 show() method.
7 """
8
9 from .Qt import QtCore, QtGui, mkQApp
10 from .widgets.PlotWidget import *
11 from .imageview import *
12 from .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
13 from .widgets.GraphicsView import GraphicsView
14
15
16 class GraphicsWindow(GraphicsLayoutWidget):
17 """
18 (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)
19
20 Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class
21 is intended for use from the interactive python prompt.
22 """
23 def __init__(self, title=None, size=(800,600), **kargs):
24 mkQApp()
25 GraphicsLayoutWidget.__init__(self, **kargs)
26 self.resize(*size)
27 if title is not None:
28 self.setWindowTitle(title)
29 self.show()
30
31
32 class TabWindow(QtGui.QMainWindow):
33 """
34 (deprecated)
35 """
36 def __init__(self, title=None, size=(800,600)):
37 mkQApp()
38 QtGui.QMainWindow.__init__(self)
39 self.resize(*size)
40 self.cw = QtGui.QTabWidget()
41 self.setCentralWidget(self.cw)
42 if title is not None:
43 self.setWindowTitle(title)
44 self.show()
45
46 def __getattr__(self, attr):
47 return getattr(self.cw, attr)
48
49
50 class PlotWindow(PlotWidget):
51 sigClosed = QtCore.Signal(object)
52
53 """
54 (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)
55 """
56 def __init__(self, title=None, **kargs):
57 mkQApp()
58 PlotWidget.__init__(self, **kargs)
59 if title is not None:
60 self.setWindowTitle(title)
61 self.show()
62
63 def closeEvent(self, event):
64 PlotWidget.closeEvent(self, event)
65 self.sigClosed.emit(self)
66
67
68 class ImageWindow(ImageView):
69 sigClosed = QtCore.Signal(object)
70
71 """
72 (deprecated; use :class:`~pyqtgraph.ImageView` instead)
73 """
74 def __init__(self, *args, **kargs):
75 mkQApp()
76 ImageView.__init__(self)
77 if 'title' in kargs:
78 self.setWindowTitle(kargs['title'])
79 del kargs['title']
80 if len(args) > 0 or len(kargs) > 0:
81 self.setImage(*args, **kargs)
82 self.show()
83
84 def closeEvent(self, event):
85 ImageView.closeEvent(self, event)
86 self.sigClosed.emit(self)
87
[end of pyqtgraph/graphicsWindows.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py
--- a/pyqtgraph/graphicsWindows.py
+++ b/pyqtgraph/graphicsWindows.py
@@ -55,10 +55,14 @@
"""
def __init__(self, title=None, **kargs):
mkQApp()
+ self.win = QtGui.QMainWindow()
PlotWidget.__init__(self, **kargs)
+ self.win.setCentralWidget(self)
+ for m in ['resize']:
+ setattr(self, m, getattr(self.win, m))
if title is not None:
- self.setWindowTitle(title)
- self.show()
+ self.win.setWindowTitle(title)
+ self.win.show()
def closeEvent(self, event):
PlotWidget.closeEvent(self, event)
@@ -73,14 +77,20 @@
"""
def __init__(self, *args, **kargs):
mkQApp()
- ImageView.__init__(self)
+ self.win = QtGui.QMainWindow()
+ self.win.resize(800,600)
if 'title' in kargs:
- self.setWindowTitle(kargs['title'])
+ self.win.setWindowTitle(kargs['title'])
del kargs['title']
+ ImageView.__init__(self, self.win)
if len(args) > 0 or len(kargs) > 0:
self.setImage(*args, **kargs)
- self.show()
-
+
+ self.win.setCentralWidget(self)
+ for m in ['resize']:
+ setattr(self, m, getattr(self.win, m))
+ self.win.show()
+
def closeEvent(self, event):
ImageView.closeEvent(self, event)
self.sigClosed.emit(self)
|
{"golden_diff": "diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py\n--- a/pyqtgraph/graphicsWindows.py\n+++ b/pyqtgraph/graphicsWindows.py\n@@ -55,10 +55,14 @@\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n+ self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n+ self.win.setCentralWidget(self)\n+ for m in ['resize']:\n+ setattr(self, m, getattr(self.win, m))\n if title is not None:\n- self.setWindowTitle(title)\n- self.show()\n+ self.win.setWindowTitle(title)\n+ self.win.show()\n \n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n@@ -73,14 +77,20 @@\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n- ImageView.__init__(self)\n+ self.win = QtGui.QMainWindow()\n+ self.win.resize(800,600)\n if 'title' in kargs:\n- self.setWindowTitle(kargs['title'])\n+ self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n+ ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n- self.show()\n-\n+ \n+ self.win.setCentralWidget(self)\n+ for m in ['resize']:\n+ setattr(self, m, getattr(self.win, m))\n+ self.win.show()\n+ \n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "issue": "Segfault when exiting Command Line Usage Example\n<!-- In the following, please describe your issue in detail! -->\r\n<!-- If some of the sections do not apply, just remove them. -->\r\n\r\n### Short description\r\nWhen Running the command line example, and closing the window, I get a segmentation fault\r\n\r\n### Code to reproduce\r\n<!-- Please provide a minimal working example that reproduces the issue in the code block below.\r\n Ideally, this should be a full example someone else could run without additional setup. -->\r\n```bash\r\npython -m pyqtgraph.examples\r\n```\r\n\r\nRun the command line usage example, and close out the window.\r\n\r\n\r\n### Expected behavior\r\nWindow closes\r\n\r\n### Real behavior\r\nWindow closes, generates a segfault\r\n\r\n```\r\n/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py:125: RuntimeWarning: 'pyqtgraph.examples.__main__' found in sys.modules after import of package 'pyqtgraph.examples', but prior to execution of 'pyqtgraph.examples.__main__'; this may result in unpredictable behaviour\r\n warn(RuntimeWarning(msg))\r\nqt.qpa.fonts: Populating font family aliases took 246 ms. Replace uses of missing font family \"FreeMono\" with one that exists to avoid this cost.\r\nUsing PyQt5 (default graphics system)\r\nFatal Python error: Segmentation fault\r\n\r\nCurrent thread 0x000000010840bdc0 (most recent call first):\r\n File \"/Users/ognyan/Developer/pyqtgraph/.venv/lib/python3.7/site-packages/pyqtgraph/examples/CLIexample.py\", line 26 in <module>\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11\r\n * Qt Python binding: PySide2 5.15.0 and PyQt5 5.15.0\r\n * Python version: 3.7.7\r\n * NumPy version: 1.18.4\r\n * Operating system: macOS Catalina\r\n * Installation method: pip\r\n\r\n### Additional context\r\n\r\nThis is not an issue with 0.11.rc0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)\n \n Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class\n is intended for use from the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.cw, attr)\n \n\nclass PlotWindow(PlotWidget):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n PlotWidget.__init__(self, **kargs)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n\n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n self.sigClosed.emit(self)\n\n\nclass ImageWindow(ImageView):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.ImageView` instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n ImageView.__init__(self)\n if 'title' in kargs:\n self.setWindowTitle(kargs['title'])\n del kargs['title']\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n self.show()\n\n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "path": "pyqtgraph/graphicsWindows.py"}]}
| 1,754 | 383 |
gh_patches_debug_15978
|
rasdani/github-patches
|
git_diff
|
NVIDIA-Merlin__NVTabular-455
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEA] Warn on parquet row group sizes out of recommended bounds
**Is your feature request related to a problem? Please describe.**
We should warn on parquet files that contain row groups bigger than recommended - with actionable links and information for our customers
</issue>
<code>
[start of nvtabular/io/parquet.py]
1 #
2 # Copyright (c) 2020, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 import functools
17 import logging
18 import os
19 import threading
20 import warnings
21 from io import BytesIO
22 from uuid import uuid4
23
24 import cudf
25 import dask_cudf
26 from cudf.io.parquet import ParquetWriter as pwriter
27 from dask.utils import natural_sort_key
28 from pyarrow import parquet as pq
29
30 from .dataset_engine import DatasetEngine
31 from .shuffle import Shuffle, _shuffle_gdf
32 from .writer import ThreadedWriter
33
34 LOG = logging.getLogger("nvtabular")
35
36
37 class ParquetDatasetEngine(DatasetEngine):
38 """ParquetDatasetEngine is a Dask-based version of cudf.read_parquet."""
39
40 def __init__(
41 self,
42 paths,
43 part_size,
44 storage_options,
45 row_groups_per_part=None,
46 legacy=False,
47 batch_size=None, # Ignored
48 ):
49 super().__init__(paths, part_size, storage_options)
50 if row_groups_per_part is None:
51 path0 = self._dataset.pieces[0].path
52 rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0, row_group=0))
53 row_groups_per_part = self.part_size / rg_byte_size_0
54 if row_groups_per_part < 1.0:
55 warnings.warn(
56 f"Row group size {rg_byte_size_0} is bigger than requested part_size "
57 f"{self.part_size}"
58 )
59 row_groups_per_part = 1.0
60
61 self.row_groups_per_part = int(row_groups_per_part)
62
63 assert self.row_groups_per_part > 0
64
65 @property
66 @functools.lru_cache(1)
67 def _dataset(self):
68 paths = self.paths
69 fs = self.fs
70 if len(paths) > 1:
71 # This is a list of files
72 dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)
73 elif fs.isdir(paths[0]):
74 # This is a directory
75 dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)
76 else:
77 # This is a single file
78 dataset = pq.ParquetDataset(paths[0], filesystem=fs)
79 return dataset
80
81 @property
82 @functools.lru_cache(1)
83 def num_rows(self):
84 # TODO: Avoid parsing metadata here if we can confirm upstream dask
85 # can get the length efficiently (in all practical cases)
86 dataset = self._dataset
87 if dataset.metadata:
88 # We have a metadata file
89 return dataset.metadata.num_rows
90 else:
91 # Sum up row-group sizes manually
92 num_rows = 0
93 for piece in dataset.pieces:
94 num_rows += piece.get_metadata().num_rows
95 return num_rows
96
97 def to_ddf(self, columns=None):
98 return dask_cudf.read_parquet(
99 self.paths,
100 columns=columns,
101 # can't omit reading the index in if we aren't being passed columns
102 index=None if columns is None else False,
103 gather_statistics=False,
104 split_row_groups=self.row_groups_per_part,
105 storage_options=self.storage_options,
106 )
107
108
109 class ParquetWriter(ThreadedWriter):
110 def __init__(self, out_dir, **kwargs):
111 super().__init__(out_dir, **kwargs)
112 self.data_paths = []
113 self.data_writers = []
114 self.data_bios = []
115 self._lock = threading.RLock()
116
117 def _get_filename(self, i):
118 if self.use_guid:
119 fn = f"{i}.{guid()}.parquet"
120 else:
121 fn = f"{i}.parquet"
122
123 return os.path.join(self.out_dir, fn)
124
125 def _get_or_create_writer(self, idx):
126 # lazily initializes a writer for the given index
127 with self._lock:
128 while len(self.data_writers) <= idx:
129 path = self._get_filename(len(self.data_writers))
130 self.data_paths.append(path)
131 if self.bytes_io:
132 bio = BytesIO()
133 self.data_bios.append(bio)
134 self.data_writers.append(pwriter(bio, compression=None))
135 else:
136 self.data_writers.append(pwriter(path, compression=None))
137
138 return self.data_writers[idx]
139
140 def _write_table(self, idx, data, has_list_column=False):
141 if has_list_column:
142 # currently cudf doesn't support chunked parquet writers with list columns
143 # write out a new file, rather than stream multiple chunks to a single file
144 filename = self._get_filename(len(self.data_paths))
145 data.to_parquet(filename)
146 self.data_paths.append(filename)
147 else:
148 writer = self._get_or_create_writer(idx)
149 writer.write_table(data)
150
151 def _write_thread(self):
152 while True:
153 item = self.queue.get()
154 try:
155 if item is self._eod:
156 break
157 idx, data = item
158 with self.write_locks[idx]:
159 self._write_table(idx, data, False)
160 finally:
161 self.queue.task_done()
162
163 @classmethod
164 def write_special_metadata(cls, md, fs, out_dir):
165 # Sort metadata by file name and convert list of
166 # tuples to a list of metadata byte-blobs
167 md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]
168
169 # Aggregate metadata and write _metadata file
170 _write_pq_metadata_file(md_list, fs, out_dir)
171
172 def _close_writers(self):
173 md_dict = {}
174 for writer, path in zip(self.data_writers, self.data_paths):
175 fn = path.split(self.fs.sep)[-1]
176 md_dict[fn] = writer.close(metadata_file_path=fn)
177 return md_dict
178
179 def _bytesio_to_disk(self):
180 for bio, path in zip(self.data_bios, self.data_paths):
181 gdf = cudf.io.read_parquet(bio, index=False)
182 bio.close()
183 if self.shuffle == Shuffle.PER_WORKER:
184 gdf = _shuffle_gdf(gdf)
185 gdf.to_parquet(path, compression=None, index=False)
186 return
187
188
189 def _write_pq_metadata_file(md_list, fs, path):
190 """ Converts list of parquet metadata objects into a single shared _metadata file. """
191 if md_list:
192 metadata_path = fs.sep.join([path, "_metadata"])
193 _meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]
194 with fs.open(metadata_path, "wb") as fil:
195 _meta.tofile(fil)
196 return
197
198
199 def guid():
200 """Simple utility function to get random hex string"""
201 return uuid4().hex
202
203
204 def _memory_usage(df):
205 """this function is a workaround of a problem with getting memory usage of lists
206 in cudf0.16. This can be deleted and just use `df.memory_usage(deep= True, index=True).sum()`
207 once we are using cudf 0.17 (fixed in https://github.com/rapidsai/cudf/pull/6549)"""
208 size = 0
209 for col in df._data.columns:
210 if cudf.utils.dtypes.is_list_dtype(col.dtype):
211 for child in col.base_children:
212 size += child.__sizeof__()
213 else:
214 size += col._memory_usage(deep=True)
215 size += df.index.memory_usage(deep=True)
216 return size
217
[end of nvtabular/io/parquet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py
--- a/nvtabular/io/parquet.py
+++ b/nvtabular/io/parquet.py
@@ -53,8 +53,12 @@
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
- f"Row group size {rg_byte_size_0} is bigger than requested part_size "
- f"{self.part_size}"
+ f"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger"
+ f" than requested part_size ({self.part_size}) for the NVTabular dataset."
+ f"A row group memory size of 128 MB is generally recommended. You can find"
+ f" info on how to set the row group size of parquet files in "
+ f"https://nvidia.github.io/NVTabular/main/HowItWorks.html"
+ f"#getting-your-data-ready-for-nvtabular"
)
row_groups_per_part = 1.0
|
{"golden_diff": "diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py\n--- a/nvtabular/io/parquet.py\n+++ b/nvtabular/io/parquet.py\n@@ -53,8 +53,12 @@\n row_groups_per_part = self.part_size / rg_byte_size_0\n if row_groups_per_part < 1.0:\n warnings.warn(\n- f\"Row group size {rg_byte_size_0} is bigger than requested part_size \"\n- f\"{self.part_size}\"\n+ f\"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger\"\n+ f\" than requested part_size ({self.part_size}) for the NVTabular dataset.\"\n+ f\"A row group memory size of 128 MB is generally recommended. You can find\"\n+ f\" info on how to set the row group size of parquet files in \"\n+ f\"https://nvidia.github.io/NVTabular/main/HowItWorks.html\"\n+ f\"#getting-your-data-ready-for-nvtabular\"\n )\n row_groups_per_part = 1.0\n", "issue": "[FEA] Warn on parquet row group sizes out of recommended bounds\n**Is your feature request related to a problem? Please describe.**\r\nWe should warn on parquet files that contain row groups bigger than recommended - with actionable links and information for our customers\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport functools\nimport logging\nimport os\nimport threading\nimport warnings\nfrom io import BytesIO\nfrom uuid import uuid4\n\nimport cudf\nimport dask_cudf\nfrom cudf.io.parquet import ParquetWriter as pwriter\nfrom dask.utils import natural_sort_key\nfrom pyarrow import parquet as pq\n\nfrom .dataset_engine import DatasetEngine\nfrom .shuffle import Shuffle, _shuffle_gdf\nfrom .writer import ThreadedWriter\n\nLOG = logging.getLogger(\"nvtabular\")\n\n\nclass ParquetDatasetEngine(DatasetEngine):\n \"\"\"ParquetDatasetEngine is a Dask-based version of cudf.read_parquet.\"\"\"\n\n def __init__(\n self,\n paths,\n part_size,\n storage_options,\n row_groups_per_part=None,\n legacy=False,\n batch_size=None, # Ignored\n ):\n super().__init__(paths, part_size, storage_options)\n if row_groups_per_part is None:\n path0 = self._dataset.pieces[0].path\n rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0, row_group=0))\n row_groups_per_part = self.part_size / rg_byte_size_0\n if row_groups_per_part < 1.0:\n warnings.warn(\n f\"Row group size {rg_byte_size_0} is bigger than requested part_size \"\n f\"{self.part_size}\"\n )\n row_groups_per_part = 1.0\n\n self.row_groups_per_part = int(row_groups_per_part)\n\n assert self.row_groups_per_part > 0\n\n @property\n @functools.lru_cache(1)\n def _dataset(self):\n paths = self.paths\n fs = self.fs\n if len(paths) > 1:\n # This is a list of files\n dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)\n elif fs.isdir(paths[0]):\n # This is a directory\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)\n else:\n # This is a single file\n dataset = pq.ParquetDataset(paths[0], filesystem=fs)\n return dataset\n\n @property\n @functools.lru_cache(1)\n def num_rows(self):\n # TODO: Avoid parsing metadata here if we can confirm upstream dask\n # can get the length efficiently (in all practical cases)\n dataset = self._dataset\n if dataset.metadata:\n # We have a metadata file\n return dataset.metadata.num_rows\n else:\n # Sum up row-group sizes manually\n num_rows = 0\n for piece in dataset.pieces:\n num_rows += piece.get_metadata().num_rows\n return num_rows\n\n def to_ddf(self, columns=None):\n return dask_cudf.read_parquet(\n self.paths,\n columns=columns,\n # can't omit reading the index in if we aren't being passed columns\n index=None if columns is None else False,\n gather_statistics=False,\n split_row_groups=self.row_groups_per_part,\n storage_options=self.storage_options,\n )\n\n\nclass ParquetWriter(ThreadedWriter):\n def __init__(self, out_dir, **kwargs):\n super().__init__(out_dir, **kwargs)\n self.data_paths = []\n self.data_writers = []\n self.data_bios = []\n self._lock = threading.RLock()\n\n def _get_filename(self, i):\n if self.use_guid:\n fn = f\"{i}.{guid()}.parquet\"\n else:\n fn = f\"{i}.parquet\"\n\n return os.path.join(self.out_dir, fn)\n\n def _get_or_create_writer(self, idx):\n # lazily initializes a writer for the given index\n with self._lock:\n while len(self.data_writers) <= idx:\n path = self._get_filename(len(self.data_writers))\n self.data_paths.append(path)\n if self.bytes_io:\n bio = BytesIO()\n self.data_bios.append(bio)\n self.data_writers.append(pwriter(bio, compression=None))\n else:\n self.data_writers.append(pwriter(path, compression=None))\n\n return self.data_writers[idx]\n\n def _write_table(self, idx, data, has_list_column=False):\n if has_list_column:\n # currently cudf doesn't support chunked parquet writers with list columns\n # write out a new file, rather than stream multiple chunks to a single file\n filename = self._get_filename(len(self.data_paths))\n data.to_parquet(filename)\n self.data_paths.append(filename)\n else:\n writer = self._get_or_create_writer(idx)\n writer.write_table(data)\n\n def _write_thread(self):\n while True:\n item = self.queue.get()\n try:\n if item is self._eod:\n break\n idx, data = item\n with self.write_locks[idx]:\n self._write_table(idx, data, False)\n finally:\n self.queue.task_done()\n\n @classmethod\n def write_special_metadata(cls, md, fs, out_dir):\n # Sort metadata by file name and convert list of\n # tuples to a list of metadata byte-blobs\n md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]\n\n # Aggregate metadata and write _metadata file\n _write_pq_metadata_file(md_list, fs, out_dir)\n\n def _close_writers(self):\n md_dict = {}\n for writer, path in zip(self.data_writers, self.data_paths):\n fn = path.split(self.fs.sep)[-1]\n md_dict[fn] = writer.close(metadata_file_path=fn)\n return md_dict\n\n def _bytesio_to_disk(self):\n for bio, path in zip(self.data_bios, self.data_paths):\n gdf = cudf.io.read_parquet(bio, index=False)\n bio.close()\n if self.shuffle == Shuffle.PER_WORKER:\n gdf = _shuffle_gdf(gdf)\n gdf.to_parquet(path, compression=None, index=False)\n return\n\n\ndef _write_pq_metadata_file(md_list, fs, path):\n \"\"\" Converts list of parquet metadata objects into a single shared _metadata file. \"\"\"\n if md_list:\n metadata_path = fs.sep.join([path, \"_metadata\"])\n _meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]\n with fs.open(metadata_path, \"wb\") as fil:\n _meta.tofile(fil)\n return\n\n\ndef guid():\n \"\"\"Simple utility function to get random hex string\"\"\"\n return uuid4().hex\n\n\ndef _memory_usage(df):\n \"\"\"this function is a workaround of a problem with getting memory usage of lists\n in cudf0.16. This can be deleted and just use `df.memory_usage(deep= True, index=True).sum()`\n once we are using cudf 0.17 (fixed in https://github.com/rapidsai/cudf/pull/6549)\"\"\"\n size = 0\n for col in df._data.columns:\n if cudf.utils.dtypes.is_list_dtype(col.dtype):\n for child in col.base_children:\n size += child.__sizeof__()\n else:\n size += col._memory_usage(deep=True)\n size += df.index.memory_usage(deep=True)\n return size\n", "path": "nvtabular/io/parquet.py"}]}
| 2,902 | 255 |
gh_patches_debug_8470
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-338
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kinto start fails: no module named functools32
```
kinto start
Starting subprocess with file monitor
2015-11-29 10:12:24,821 INFO [venusian][MainThread] kinto 1.9.0 starting.
Traceback (most recent call last):
File "/var/www/kinto.leplat.re/venv/bin/kinto", line 9, in <module>
load_entry_point('kinto==1.9.0', 'console_scripts', 'kinto')()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__main__.py", line 55, in main
pserve.main(pserve_argv)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 60, in main
return command.run()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 366, in run
global_conf=vars)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 401, in loadapp
return loadapp(app_spec, name=name, relative_to=relative_to, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 247, in loadapp
return loadobj(APP, uri, name=name, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 272, in loadobj
return context.create()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 710, in create
return self.object_type.invoke(self)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 146, in invoke
return fix_call(context.object, context.global_conf, **context.local_conf)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/util.py", line 55, in fix_call
val = callable(*args, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__init__.py", line 77, in main
config.scan("kinto.views", **kwargs)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/config/__init__.py", line 974, in scan
ignore=ignore)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/venusian/__init__.py", line 205, in scan
__import__(modname)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/buckets.py", line 12, in <module>
from kinto.views.collections import Collection
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/collections.py", line 2, in <module>
import jsonschema
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/__init__.py", line 12, in <module>
from jsonschema.exceptions import (
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/exceptions.py", line 6, in <module>
from jsonschema import _utils
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/_utils.py", line 6, in <module>
from jsonschema.compat import str_types, MutableMapping, urlsplit
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/compat.py", line 39, in <module>
from functools32 import lru_cache
ImportError: No module named functools32
```
</issue>
<code>
[start of setup.py]
1 import os
2 import codecs
3 from setuptools import setup, find_packages
4
5 here = os.path.abspath(os.path.dirname(__file__))
6
7
8 def read_file(filename):
9 """Open a related file and return its content."""
10 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
11 content = f.read()
12 return content
13
14 README = read_file('README.rst')
15 CHANGELOG = read_file('CHANGELOG.rst')
16 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
17
18 REQUIREMENTS = [
19 'waitress',
20 'cliquet>=2.13,<3',
21 'jsonschema',
22 ]
23
24 POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
25 'cliquet[postgresql]>=2.13,<3'
26 ]
27
28 MONITORING_REQUIREMENTS = REQUIREMENTS + [
29 'cliquet[monitoring]>=2.13,<3'
30 ]
31
32 FXA_REQUIREMENTS = REQUIREMENTS + [
33 'cliquet-fxa'
34 ]
35
36 ENTRY_POINTS = {
37 'paste.app_factory': [
38 'main = kinto:main',
39 ],
40 'console_scripts': [
41 'kinto = kinto.__main__:main'
42 ],
43 }
44
45 DEPENDENCY_LINKS = [
46 ]
47
48 setup(name='kinto',
49 version='1.11.0.dev0',
50 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
51 long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
52 license='Apache License (2.0)',
53 classifiers=[
54 "Programming Language :: Python",
55 "Programming Language :: Python :: 2",
56 "Programming Language :: Python :: 2.7",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.4",
59 "Programming Language :: Python :: 3.5",
60 "Programming Language :: Python :: Implementation :: CPython",
61 "Programming Language :: Python :: Implementation :: PyPy",
62 "Topic :: Internet :: WWW/HTTP",
63 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
64 "License :: OSI Approved :: Apache Software License"
65 ],
66 keywords="web services",
67 author='Mozilla Services',
68 author_email='[email protected]',
69 url='https://github.com/Kinto/kinto',
70 packages=find_packages(),
71 include_package_data=True,
72 zip_safe=False,
73 install_requires=REQUIREMENTS,
74 extras_require={
75 'postgresql': POSTGRESQL_REQUIREMENTS,
76 'monitoring': MONITORING_REQUIREMENTS,
77 'fxa': FXA_REQUIREMENTS,
78 },
79 entry_points=ENTRY_POINTS,
80 dependency_links=DEPENDENCY_LINKS)
81
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,6 @@
-import os
import codecs
+import os
+import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
@@ -21,6 +22,11 @@
'jsonschema',
]
+if sys.version_info < (3,):
+ REQUIREMENTS.extend([
+ 'functools32', # not installed by jsonschema with old pip versions.
+ ])
+
POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
'cliquet[postgresql]>=2.13,<3'
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,6 @@\n-import os\n import codecs\n+import os\n+import sys\n from setuptools import setup, find_packages\n \n here = os.path.abspath(os.path.dirname(__file__))\n@@ -21,6 +22,11 @@\n 'jsonschema',\n ]\n \n+if sys.version_info < (3,):\n+ REQUIREMENTS.extend([\n+ 'functools32', # not installed by jsonschema with old pip versions.\n+ ])\n+\n POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n ]\n", "issue": "kinto start fails: no module named functools32\n```\nkinto start\nStarting subprocess with file monitor\n2015-11-29 10:12:24,821 INFO [venusian][MainThread] kinto 1.9.0 starting. \nTraceback (most recent call last):\n File \"/var/www/kinto.leplat.re/venv/bin/kinto\", line 9, in <module>\n load_entry_point('kinto==1.9.0', 'console_scripts', 'kinto')()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__main__.py\", line 55, in main\n pserve.main(pserve_argv)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 60, in main\n return command.run()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 366, in run\n global_conf=vars)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 401, in loadapp\n return loadapp(app_spec, name=name, relative_to=relative_to, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 247, in loadapp\n return loadobj(APP, uri, name=name, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 272, in loadobj\n return context.create()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 710, in create\n return self.object_type.invoke(self)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 146, in invoke\n return fix_call(context.object, context.global_conf, **context.local_conf)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/util.py\", line 55, in fix_call\n val = callable(*args, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__init__.py\", line 77, in main\n config.scan(\"kinto.views\", **kwargs)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/config/__init__.py\", line 974, in scan\n ignore=ignore)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/venusian/__init__.py\", line 205, in scan\n __import__(modname)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/buckets.py\", line 12, in <module>\n from kinto.views.collections import Collection\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/collections.py\", line 2, in <module>\n import jsonschema\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/__init__.py\", line 12, in <module>\n from jsonschema.exceptions import (\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/exceptions.py\", line 6, in <module>\n from jsonschema import _utils\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/_utils.py\", line 6, in <module>\n from jsonschema.compat import str_types, MutableMapping, urlsplit\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/compat.py\", line 39, in <module>\n from functools32 import lru_cache\nImportError: No module named functools32\n```\n\n", "before_files": [{"content": "import os\nimport codecs\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'waitress',\n 'cliquet>=2.13,<3',\n 'jsonschema',\n]\n\nPOSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n]\n\nMONITORING_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[monitoring]>=2.13,<3'\n]\n\nFXA_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet-fxa'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\nDEPENDENCY_LINKS = [\n]\n\nsetup(name='kinto',\n version='1.11.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIREMENTS,\n 'monitoring': MONITORING_REQUIREMENTS,\n 'fxa': FXA_REQUIREMENTS,\n },\n entry_points=ENTRY_POINTS,\n dependency_links=DEPENDENCY_LINKS)\n", "path": "setup.py"}]}
| 2,229 | 150 |
gh_patches_debug_21792
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2546
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Footer widget footer--description css selector not working
Have you checked closed issues? https://github.com/Textualize/textual/issues?q=is%3Aissue+is%3Aclosed
yes
Please give a brief but clear explanation of the issue. If you can, include a complete working example that demonstrates the bug. **Check it can run without modifications.**
This is my first issue submitted and I'm new to Textual so this could be user error. I'm trying to style a footer widget with CSS. I have successfully used the footer--highlight, footer--highlight-key and footer--key component classes to style the footer, but the footer--description class doesn't seem to do anything and the default css from the Footer class is used. Shouldn't footer--description override Footer in my example below? I would expect the "Quit" text to start off with a purple background. Thanks!
```
from textual.app import App, ComposeResult
from textual.widgets import Footer
class App(App):
CSS = """
Footer .footer--description {
background: purple;
}
Footer .footer--highlight {
background: red;
}
Footer .footer--highlight-key {
background: blue;
}
Footer .footer--key {
background: green;
}
"""
BINDINGS = [
("q", "quit", "Quit"),
]
def compose(self) -> ComposeResult:
yield Footer()
if __name__ == "__main__":
app = App()
app.run()
```
It will be helpful if you run the following command and paste the results:
```
textual diagnose
```
# Textual Diagnostics
## Versions
| Name | Value |
|---------|--------|
| Textual | 0.24.1 |
| Rich | 13.3.5 |
## Python
| Name | Value |
|----------------|--------------------------------------------------------|
| Version | 3.11.0 |
| Implementation | CPython |
| Compiler | Clang 14.0.0 (clang-1400.0.29.102) |
| Executable | /Users/Tom/Projects/Python/PythonDemos/venv/bin/python |
## Operating System
| Name | Value |
|---------|--------------------------------------------------------------------------------------------------|
| System | Darwin |
| Release | 22.4.0 |
| Version | Darwin Kernel Version 22.4.0: Mon Mar 6 21:00:17 PST 2023; root:xnu-8796.101.5~3/RELEASE_X86_64 |
## Terminal
| Name | Value |
|----------------------|-------------|
| Terminal Application | Kitty |
| TERM | xterm-kitty |
| COLORTERM | truecolor |
| FORCE_COLOR | *Not set* |
| NO_COLOR | *Not set* |
## Rich Console options
| Name | Value |
|----------------|----------------------|
| size | width=192, height=45 |
| legacy_windows | False |
| min_width | 1 |
| max_width | 192 |
| is_terminal | True |
| encoding | utf-8 |
| max_height | 45 |
| justify | None |
| overflow | None |
| no_wrap | False |
| highlight | None |
| markup | None |
| height | None |
Feel free to add screenshots and / or videos. These can be very helpful!
</issue>
<code>
[start of src/textual/widgets/_footer.py]
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from typing import ClassVar, Optional
5
6 import rich.repr
7 from rich.console import RenderableType
8 from rich.text import Text
9
10 from .. import events
11 from ..reactive import reactive
12 from ..widget import Widget
13
14
15 @rich.repr.auto
16 class Footer(Widget):
17 """A simple footer widget which docks itself to the bottom of the parent container."""
18
19 COMPONENT_CLASSES: ClassVar[set[str]] = {
20 "footer--description",
21 "footer--key",
22 "footer--highlight",
23 "footer--highlight-key",
24 }
25 """
26 | Class | Description |
27 | :- | :- |
28 | `footer--description` | Targets the descriptions of the key bindings. |
29 | `footer--highlight` | Targets the highlighted key binding. |
30 | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |
31 | `footer--key` | Targets the key portions of the key bindings. |
32 """
33
34 DEFAULT_CSS = """
35 Footer {
36 background: $accent;
37 color: $text;
38 dock: bottom;
39 height: 1;
40 }
41 Footer > .footer--highlight {
42 background: $accent-darken-1;
43 }
44
45 Footer > .footer--highlight-key {
46 background: $secondary;
47 text-style: bold;
48 }
49
50 Footer > .footer--key {
51 text-style: bold;
52 background: $accent-darken-2;
53 }
54 """
55
56 highlight_key: reactive[str | None] = reactive[Optional[str]](None)
57
58 def __init__(self) -> None:
59 super().__init__()
60 self._key_text: Text | None = None
61 self.auto_links = False
62
63 async def watch_highlight_key(self) -> None:
64 """If highlight key changes we need to regenerate the text."""
65 self._key_text = None
66 self.refresh()
67
68 def _on_mount(self, _: events.Mount) -> None:
69 self.watch(self.screen, "focused", self._bindings_changed)
70 self.watch(self.screen, "stack_updates", self._bindings_changed)
71
72 def _bindings_changed(self, _: Widget | None) -> None:
73 self._key_text = None
74 self.refresh()
75
76 def _on_mouse_move(self, event: events.MouseMove) -> None:
77 """Store any key we are moving over."""
78 self.highlight_key = event.style.meta.get("key")
79
80 def _on_leave(self, _: events.Leave) -> None:
81 """Clear any highlight when the mouse leaves the widget"""
82 if self.screen.is_current:
83 self.highlight_key = None
84
85 def __rich_repr__(self) -> rich.repr.Result:
86 yield from super().__rich_repr__()
87
88 def _make_key_text(self) -> Text:
89 """Create text containing all the keys."""
90 base_style = self.rich_style
91 text = Text(
92 style=self.rich_style,
93 no_wrap=True,
94 overflow="ellipsis",
95 justify="left",
96 end="",
97 )
98 highlight_style = self.get_component_rich_style("footer--highlight")
99 highlight_key_style = self.get_component_rich_style("footer--highlight-key")
100 key_style = self.get_component_rich_style("footer--key")
101
102 bindings = [
103 binding
104 for (_, binding) in self.app.namespace_bindings.values()
105 if binding.show
106 ]
107
108 action_to_bindings = defaultdict(list)
109 for binding in bindings:
110 action_to_bindings[binding.action].append(binding)
111
112 for _, bindings in action_to_bindings.items():
113 binding = bindings[0]
114 if binding.key_display is None:
115 key_display = self.app.get_key_display(binding.key)
116 if key_display is None:
117 key_display = binding.key.upper()
118 else:
119 key_display = binding.key_display
120 hovered = self.highlight_key == binding.key
121 key_text = Text.assemble(
122 (f" {key_display} ", highlight_key_style if hovered else key_style),
123 (
124 f" {binding.description} ",
125 highlight_style if hovered else base_style,
126 ),
127 meta={
128 "@click": f"app.check_bindings('{binding.key}')",
129 "key": binding.key,
130 },
131 )
132 text.append_text(key_text)
133 return text
134
135 def notify_style_update(self) -> None:
136 self._key_text = None
137
138 def post_render(self, renderable):
139 return renderable
140
141 def render(self) -> RenderableType:
142 if self._key_text is None:
143 self._key_text = self._make_key_text()
144 return self._key_text
145
[end of src/textual/widgets/_footer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py
--- a/src/textual/widgets/_footer.py
+++ b/src/textual/widgets/_footer.py
@@ -98,6 +98,7 @@
highlight_style = self.get_component_rich_style("footer--highlight")
highlight_key_style = self.get_component_rich_style("footer--highlight-key")
key_style = self.get_component_rich_style("footer--key")
+ description_style = self.get_component_rich_style("footer--description")
bindings = [
binding
@@ -122,7 +123,7 @@
(f" {key_display} ", highlight_key_style if hovered else key_style),
(
f" {binding.description} ",
- highlight_style if hovered else base_style,
+ highlight_style if hovered else base_style + description_style,
),
meta={
"@click": f"app.check_bindings('{binding.key}')",
|
{"golden_diff": "diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py\n--- a/src/textual/widgets/_footer.py\n+++ b/src/textual/widgets/_footer.py\n@@ -98,6 +98,7 @@\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n+ description_style = self.get_component_rich_style(\"footer--description\")\n \n bindings = [\n binding\n@@ -122,7 +123,7 @@\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n- highlight_style if hovered else base_style,\n+ highlight_style if hovered else base_style + description_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n", "issue": "Footer widget footer--description css selector not working\nHave you checked closed issues? https://github.com/Textualize/textual/issues?q=is%3Aissue+is%3Aclosed\r\nyes\r\n\r\nPlease give a brief but clear explanation of the issue. If you can, include a complete working example that demonstrates the bug. **Check it can run without modifications.**\r\n\r\nThis is my first issue submitted and I'm new to Textual so this could be user error. I'm trying to style a footer widget with CSS. I have successfully used the footer--highlight, footer--highlight-key and footer--key component classes to style the footer, but the footer--description class doesn't seem to do anything and the default css from the Footer class is used. Shouldn't footer--description override Footer in my example below? I would expect the \"Quit\" text to start off with a purple background. Thanks!\r\n\r\n```\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Footer\r\n\r\n\r\nclass App(App):\r\n\r\n CSS = \"\"\"\r\n Footer .footer--description {\r\n background: purple;\r\n }\r\n \r\n Footer .footer--highlight {\r\n background: red;\r\n }\r\n \r\n Footer .footer--highlight-key {\r\n background: blue;\r\n }\r\n \r\n Footer .footer--key {\r\n background: green;\r\n }\r\n \"\"\"\r\n\r\n BINDINGS = [\r\n (\"q\", \"quit\", \"Quit\"),\r\n ]\r\n\r\n def compose(self) -> ComposeResult:\r\n yield Footer()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = App()\r\n app.run()\r\n```\r\n\r\n\r\nIt will be helpful if you run the following command and paste the results:\r\n\r\n```\r\ntextual diagnose\r\n```\r\n\r\n# Textual Diagnostics\r\n\r\n## Versions\r\n\r\n| Name | Value |\r\n|---------|--------|\r\n| Textual | 0.24.1 |\r\n| Rich | 13.3.5 |\r\n\r\n## Python\r\n\r\n| Name | Value |\r\n|----------------|--------------------------------------------------------|\r\n| Version | 3.11.0 |\r\n| Implementation | CPython |\r\n| Compiler | Clang 14.0.0 (clang-1400.0.29.102) |\r\n| Executable | /Users/Tom/Projects/Python/PythonDemos/venv/bin/python |\r\n\r\n## Operating System\r\n\r\n| Name | Value |\r\n|---------|--------------------------------------------------------------------------------------------------|\r\n| System | Darwin |\r\n| Release | 22.4.0 |\r\n| Version | Darwin Kernel Version 22.4.0: Mon Mar 6 21:00:17 PST 2023; root:xnu-8796.101.5~3/RELEASE_X86_64 |\r\n\r\n## Terminal\r\n\r\n| Name | Value |\r\n|----------------------|-------------|\r\n| Terminal Application | Kitty |\r\n| TERM | xterm-kitty |\r\n| COLORTERM | truecolor |\r\n| FORCE_COLOR | *Not set* |\r\n| NO_COLOR | *Not set* |\r\n\r\n## Rich Console options\r\n\r\n| Name | Value |\r\n|----------------|----------------------|\r\n| size | width=192, height=45 |\r\n| legacy_windows | False |\r\n| min_width | 1 |\r\n| max_width | 192 |\r\n| is_terminal | True |\r\n| encoding | utf-8 |\r\n| max_height | 45 |\r\n| justify | None |\r\n| overflow | None |\r\n| no_wrap | False |\r\n| highlight | None |\r\n| markup | None |\r\n| height | None |\r\n\r\n\r\nFeel free to add screenshots and / or videos. These can be very helpful!\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import ClassVar, Optional\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n\[email protected]\nclass Footer(Widget):\n \"\"\"A simple footer widget which docks itself to the bottom of the parent container.\"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"footer--description\",\n \"footer--key\",\n \"footer--highlight\",\n \"footer--highlight-key\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `footer--description` | Targets the descriptions of the key bindings. |\n | `footer--highlight` | Targets the highlighted key binding. |\n | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |\n | `footer--key` | Targets the key portions of the key bindings. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Footer {\n background: $accent;\n color: $text;\n dock: bottom;\n height: 1;\n }\n Footer > .footer--highlight {\n background: $accent-darken-1;\n }\n\n Footer > .footer--highlight-key {\n background: $secondary;\n text-style: bold;\n }\n\n Footer > .footer--key {\n text-style: bold;\n background: $accent-darken-2;\n }\n \"\"\"\n\n highlight_key: reactive[str | None] = reactive[Optional[str]](None)\n\n def __init__(self) -> None:\n super().__init__()\n self._key_text: Text | None = None\n self.auto_links = False\n\n async def watch_highlight_key(self) -> None:\n \"\"\"If highlight key changes we need to regenerate the text.\"\"\"\n self._key_text = None\n self.refresh()\n\n def _on_mount(self, _: events.Mount) -> None:\n self.watch(self.screen, \"focused\", self._bindings_changed)\n self.watch(self.screen, \"stack_updates\", self._bindings_changed)\n\n def _bindings_changed(self, _: Widget | None) -> None:\n self._key_text = None\n self.refresh()\n\n def _on_mouse_move(self, event: events.MouseMove) -> None:\n \"\"\"Store any key we are moving over.\"\"\"\n self.highlight_key = event.style.meta.get(\"key\")\n\n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n if self.screen.is_current:\n self.highlight_key = None\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n\n def _make_key_text(self) -> Text:\n \"\"\"Create text containing all the keys.\"\"\"\n base_style = self.rich_style\n text = Text(\n style=self.rich_style,\n no_wrap=True,\n overflow=\"ellipsis\",\n justify=\"left\",\n end=\"\",\n )\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n\n bindings = [\n binding\n for (_, binding) in self.app.namespace_bindings.values()\n if binding.show\n ]\n\n action_to_bindings = defaultdict(list)\n for binding in bindings:\n action_to_bindings[binding.action].append(binding)\n\n for _, bindings in action_to_bindings.items():\n binding = bindings[0]\n if binding.key_display is None:\n key_display = self.app.get_key_display(binding.key)\n if key_display is None:\n key_display = binding.key.upper()\n else:\n key_display = binding.key_display\n hovered = self.highlight_key == binding.key\n key_text = Text.assemble(\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n highlight_style if hovered else base_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n \"key\": binding.key,\n },\n )\n text.append_text(key_text)\n return text\n\n def notify_style_update(self) -> None:\n self._key_text = None\n\n def post_render(self, renderable):\n return renderable\n\n def render(self) -> RenderableType:\n if self._key_text is None:\n self._key_text = self._make_key_text()\n return self._key_text\n", "path": "src/textual/widgets/_footer.py"}]}
| 2,655 | 207 |
gh_patches_debug_10904
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1174
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Filtering for POIs in event list not working
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
When filters for POIs in the event list are applied, all events are returned, independent from their location
### Steps to Reproduce
1. Go to event list
2. Filter for event location
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
Only the events with the selected location should be shown
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
All events are shown
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
</issue>
<code>
[start of integreat_cms/cms/views/events/event_form_view.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.core.exceptions import PermissionDenied
6 from django.shortcuts import render, redirect
7 from django.utils.decorators import method_decorator
8 from django.utils.translation import ugettext as _
9 from django.views.generic import TemplateView
10
11
12 from ...constants import status, translation_status
13 from ...decorators import permission_required
14 from ...forms import EventForm, EventTranslationForm, RecurrenceRuleForm
15 from ...models import Language, Event, EventTranslation, RecurrenceRule, POI
16 from .event_context_mixin import EventContextMixin
17 from ..media.media_context_mixin import MediaContextMixin
18
19
20 logger = logging.getLogger(__name__)
21
22
23 @method_decorator(permission_required("cms.view_event"), name="dispatch")
24 @method_decorator(permission_required("cms.change_event"), name="post")
25 class EventFormView(TemplateView, EventContextMixin, MediaContextMixin):
26 """
27 Class for rendering the events form
28 """
29
30 #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)
31 template_name = "events/event_form.html"
32 #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)
33 extra_context = {
34 "current_menu_item": "events_form",
35 "translation_status": translation_status,
36 }
37
38 # pylint: disable=too-many-locals
39 def get(self, request, *args, **kwargs):
40 r"""
41 Render event form for HTTP GET requests
42
43 :param request: Object representing the user call
44 :type request: ~django.http.HttpRequest
45
46 :param \*args: The supplied arguments
47 :type \*args: list
48
49 :param \**kwargs: The supplied keyword arguments
50 :type \**kwargs: dict
51
52 :return: The rendered template response
53 :rtype: ~django.template.response.TemplateResponse
54 """
55 region = request.region
56 language = region.get_language_or_404(
57 kwargs.get("language_slug"), only_active=True
58 )
59
60 # get event and event translation objects if they exist, otherwise objects are None
61 event_instance = region.events.filter(id=kwargs.get("event_id")).first()
62 event_translation_instance = language.event_translations.filter(
63 event=event_instance
64 ).first()
65 recurrence_rule_instance = RecurrenceRule.objects.filter(
66 event=event_instance
67 ).first()
68 poi_instance = region.pois.filter(events=event_instance).first()
69
70 # Make form disabled if event is archived or user doesn't have the permission to edit the event
71 if event_instance and event_instance.archived:
72 disabled = True
73 messages.warning(
74 request, _("You cannot edit this event because it is archived.")
75 )
76 elif not request.user.has_perm("cms.change_event"):
77 disabled = True
78 messages.warning(
79 request, _("You don't have the permission to edit events.")
80 )
81 else:
82 disabled = False
83
84 if not request.user.has_perm("cms.publish_event"):
85 messages.warning(
86 request,
87 _(
88 "You don't have the permission to publish events, but you can propose changes and submit them for review instead."
89 ),
90 )
91
92 event_form = EventForm(instance=event_instance, disabled=disabled)
93 event_translation_form = EventTranslationForm(
94 instance=event_translation_instance, disabled=disabled
95 )
96 recurrence_rule_form = RecurrenceRuleForm(
97 instance=recurrence_rule_instance, disabled=disabled
98 )
99 url_link = f"{settings.WEBAPP_URL}/{region.slug}/{language.slug}/events/"
100 return render(
101 request,
102 self.template_name,
103 {
104 **self.get_context_data(**kwargs),
105 "event_form": event_form,
106 "event_translation_form": event_translation_form,
107 "recurrence_rule_form": recurrence_rule_form,
108 "poi": poi_instance,
109 "language": language,
110 "languages": region.active_languages if event_instance else [language],
111 "url_link": url_link,
112 "translation_states": event_instance.translation_states
113 if event_instance
114 else [],
115 },
116 )
117
118 # pylint: disable=too-many-locals,too-many-branches
119 def post(self, request, **kwargs):
120 r"""
121 Save event and ender event form for HTTP POST requests
122
123 :param request: Object representing the user call
124 :type request: ~django.http.HttpRequest
125
126 :param \**kwargs: The supplied keyword arguments
127 :type \**kwargs: dict
128
129 :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to publish events
130
131 :return: The rendered template response
132 :rtype: ~django.template.response.TemplateResponse
133 """
134 region = request.region
135 language = Language.objects.get(slug=kwargs.get("language_slug"))
136 poi = POI.objects.filter(id=request.POST.get("location")).first()
137
138 event_instance = Event.objects.filter(id=kwargs.get("event_id")).first()
139 recurrence_rule_instance = RecurrenceRule.objects.filter(
140 event=event_instance
141 ).first()
142 event_translation_instance = EventTranslation.objects.filter(
143 event=event_instance, language=language
144 ).first()
145
146 event_form = EventForm(
147 data=request.POST,
148 files=request.FILES,
149 instance=event_instance,
150 additional_instance_attributes={"region": region, "location": poi},
151 )
152 # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation
153 event_form_valid = event_form.is_valid()
154 recurrence_rule_form = RecurrenceRuleForm(
155 data=request.POST,
156 instance=recurrence_rule_instance,
157 event_start_date=event_form.cleaned_data.get("start_date", None),
158 )
159 event_translation_form = EventTranslationForm(
160 data=request.POST,
161 instance=event_translation_instance,
162 additional_instance_attributes={
163 "creator": request.user,
164 "language": language,
165 "event": event_form.instance,
166 },
167 )
168
169 if (
170 not event_form_valid
171 or not event_translation_form.is_valid()
172 or (
173 event_form.cleaned_data["is_recurring"]
174 and not recurrence_rule_form.is_valid()
175 )
176 ):
177 # Add error messages
178 event_form.add_error_messages(request)
179 event_translation_form.add_error_messages(request)
180 # do not call recurrence rule form clean method when recurrence rule is not set
181 if event_form.cleaned_data["is_recurring"]:
182 recurrence_rule_form.add_error_messages(request)
183 elif (
184 event_translation_form.instance.status == status.AUTO_SAVE
185 and not event_form.has_changed()
186 and not event_translation_form.has_changed()
187 and not recurrence_rule_form.has_changed()
188 ):
189 messages.info(request, _("No changes detected, autosave skipped"))
190 else:
191 # Check publish permissions
192 if event_translation_form.instance.status == status.PUBLIC:
193 if not request.user.has_perm("cms.publish_event"):
194 raise PermissionDenied(
195 f"{request.user!r} does not have the permission 'cms.publish_event'"
196 )
197 # Save forms
198 if event_form.cleaned_data.get("is_recurring"):
199 # If event is recurring, save recurrence rule
200 event_form.instance.recurrence_rule = recurrence_rule_form.save()
201 elif event_form.instance.recurrence_rule:
202 # If the event is not recurring but it was before, delete the associated recurrence rule
203 event_form.instance.recurrence_rule.delete()
204 event_form.instance.recurrence_rule = None
205 event_translation_form.instance.event = event_form.save()
206 event_translation_form.save()
207 # Add the success message and redirect to the edit page
208 if not event_instance:
209 messages.success(
210 request,
211 _('Event "{}" was successfully created').format(
212 event_translation_form.instance
213 ),
214 )
215 elif (
216 not event_form.has_changed()
217 and not event_translation_form.has_changed()
218 and not recurrence_rule_form.has_changed()
219 ):
220 messages.info(request, _("No changes detected, but date refreshed"))
221 else:
222 # Add the success message
223 event_translation_form.add_success_message(request)
224 return redirect(
225 "edit_event",
226 **{
227 "event_id": event_form.instance.id,
228 "region_slug": region.slug,
229 "language_slug": language.slug,
230 },
231 )
232
233 return render(
234 request,
235 self.template_name,
236 {
237 **self.get_context_data(**kwargs),
238 "event_form": event_form,
239 "event_translation_form": event_translation_form,
240 "recurrence_rule_form": recurrence_rule_form,
241 "poi": poi,
242 "language": language,
243 "languages": region.active_languages if event_instance else [language],
244 "translation_states": event_instance.translation_states
245 if event_instance
246 else [],
247 },
248 )
249
[end of integreat_cms/cms/views/events/event_form_view.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/integreat_cms/cms/views/events/event_form_view.py b/integreat_cms/cms/views/events/event_form_view.py
--- a/integreat_cms/cms/views/events/event_form_view.py
+++ b/integreat_cms/cms/views/events/event_form_view.py
@@ -65,7 +65,10 @@
recurrence_rule_instance = RecurrenceRule.objects.filter(
event=event_instance
).first()
- poi_instance = region.pois.filter(events=event_instance).first()
+ if event_instance:
+ poi_instance = event_instance.location
+ else:
+ poi_instance = None
# Make form disabled if event is archived or user doesn't have the permission to edit the event
if event_instance and event_instance.archived:
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/events/event_form_view.py b/integreat_cms/cms/views/events/event_form_view.py\n--- a/integreat_cms/cms/views/events/event_form_view.py\n+++ b/integreat_cms/cms/views/events/event_form_view.py\n@@ -65,7 +65,10 @@\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n- poi_instance = region.pois.filter(events=event_instance).first()\n+ if event_instance:\n+ poi_instance = event_instance.location\n+ else:\n+ poi_instance = None\n \n # Make form disabled if event is archived or user doesn't have the permission to edit the event\n if event_instance and event_instance.archived:\n", "issue": "Filtering for POIs in event list not working\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen filters for POIs in the event list are applied, all events are returned, independent from their location\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to event list\r\n2. Filter for event location\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nOnly the events with the selected location should be shown\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nAll events are shown\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\n\nfrom ...constants import status, translation_status\nfrom ...decorators import permission_required\nfrom ...forms import EventForm, EventTranslationForm, RecurrenceRuleForm\nfrom ...models import Language, Event, EventTranslation, RecurrenceRule, POI\nfrom .event_context_mixin import EventContextMixin\nfrom ..media.media_context_mixin import MediaContextMixin\n\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_event\"), name=\"dispatch\")\n@method_decorator(permission_required(\"cms.change_event\"), name=\"post\")\nclass EventFormView(TemplateView, EventContextMixin, MediaContextMixin):\n \"\"\"\n Class for rendering the events form\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"events/event_form.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n extra_context = {\n \"current_menu_item\": \"events_form\",\n \"translation_status\": translation_status,\n }\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render event form for HTTP GET requests\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n region = request.region\n language = region.get_language_or_404(\n kwargs.get(\"language_slug\"), only_active=True\n )\n\n # get event and event translation objects if they exist, otherwise objects are None\n event_instance = region.events.filter(id=kwargs.get(\"event_id\")).first()\n event_translation_instance = language.event_translations.filter(\n event=event_instance\n ).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n poi_instance = region.pois.filter(events=event_instance).first()\n\n # Make form disabled if event is archived or user doesn't have the permission to edit the event\n if event_instance and event_instance.archived:\n disabled = True\n messages.warning(\n request, _(\"You cannot edit this event because it is archived.\")\n )\n elif not request.user.has_perm(\"cms.change_event\"):\n disabled = True\n messages.warning(\n request, _(\"You don't have the permission to edit events.\")\n )\n else:\n disabled = False\n\n if not request.user.has_perm(\"cms.publish_event\"):\n messages.warning(\n request,\n _(\n \"You don't have the permission to publish events, but you can propose changes and submit them for review instead.\"\n ),\n )\n\n event_form = EventForm(instance=event_instance, disabled=disabled)\n event_translation_form = EventTranslationForm(\n instance=event_translation_instance, disabled=disabled\n )\n recurrence_rule_form = RecurrenceRuleForm(\n instance=recurrence_rule_instance, disabled=disabled\n )\n url_link = f\"{settings.WEBAPP_URL}/{region.slug}/{language.slug}/events/\"\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi_instance,\n \"language\": language,\n \"languages\": region.active_languages if event_instance else [language],\n \"url_link\": url_link,\n \"translation_states\": event_instance.translation_states\n if event_instance\n else [],\n },\n )\n\n # pylint: disable=too-many-locals,too-many-branches\n def post(self, request, **kwargs):\n r\"\"\"\n Save event and ender event form for HTTP POST requests\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to publish events\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n region = request.region\n language = Language.objects.get(slug=kwargs.get(\"language_slug\"))\n poi = POI.objects.filter(id=request.POST.get(\"location\")).first()\n\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n\n event_form = EventForm(\n data=request.POST,\n files=request.FILES,\n instance=event_instance,\n additional_instance_attributes={\"region\": region, \"location\": poi},\n )\n # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation\n event_form_valid = event_form.is_valid()\n recurrence_rule_form = RecurrenceRuleForm(\n data=request.POST,\n instance=recurrence_rule_instance,\n event_start_date=event_form.cleaned_data.get(\"start_date\", None),\n )\n event_translation_form = EventTranslationForm(\n data=request.POST,\n instance=event_translation_instance,\n additional_instance_attributes={\n \"creator\": request.user,\n \"language\": language,\n \"event\": event_form.instance,\n },\n )\n\n if (\n not event_form_valid\n or not event_translation_form.is_valid()\n or (\n event_form.cleaned_data[\"is_recurring\"]\n and not recurrence_rule_form.is_valid()\n )\n ):\n # Add error messages\n event_form.add_error_messages(request)\n event_translation_form.add_error_messages(request)\n # do not call recurrence rule form clean method when recurrence rule is not set\n if event_form.cleaned_data[\"is_recurring\"]:\n recurrence_rule_form.add_error_messages(request)\n elif (\n event_translation_form.instance.status == status.AUTO_SAVE\n and not event_form.has_changed()\n and not event_translation_form.has_changed()\n and not recurrence_rule_form.has_changed()\n ):\n messages.info(request, _(\"No changes detected, autosave skipped\"))\n else:\n # Check publish permissions\n if event_translation_form.instance.status == status.PUBLIC:\n if not request.user.has_perm(\"cms.publish_event\"):\n raise PermissionDenied(\n f\"{request.user!r} does not have the permission 'cms.publish_event'\"\n )\n # Save forms\n if event_form.cleaned_data.get(\"is_recurring\"):\n # If event is recurring, save recurrence rule\n event_form.instance.recurrence_rule = recurrence_rule_form.save()\n elif event_form.instance.recurrence_rule:\n # If the event is not recurring but it was before, delete the associated recurrence rule\n event_form.instance.recurrence_rule.delete()\n event_form.instance.recurrence_rule = None\n event_translation_form.instance.event = event_form.save()\n event_translation_form.save()\n # Add the success message and redirect to the edit page\n if not event_instance:\n messages.success(\n request,\n _('Event \"{}\" was successfully created').format(\n event_translation_form.instance\n ),\n )\n elif (\n not event_form.has_changed()\n and not event_translation_form.has_changed()\n and not recurrence_rule_form.has_changed()\n ):\n messages.info(request, _(\"No changes detected, but date refreshed\"))\n else:\n # Add the success message\n event_translation_form.add_success_message(request)\n return redirect(\n \"edit_event\",\n **{\n \"event_id\": event_form.instance.id,\n \"region_slug\": region.slug,\n \"language_slug\": language.slug,\n },\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi,\n \"language\": language,\n \"languages\": region.active_languages if event_instance else [language],\n \"translation_states\": event_instance.translation_states\n if event_instance\n else [],\n },\n )\n", "path": "integreat_cms/cms/views/events/event_form_view.py"}]}
| 3,166 | 165 |
gh_patches_debug_43103
|
rasdani/github-patches
|
git_diff
|
crytic__slither-438
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Human-summary printer: show number of lines of assemly code
The human summary printer should show if a contract uses inline assembly, and if so, shows the number of lines of assembly code
</issue>
<code>
[start of slither/printers/summary/human_summary.py]
1 """
2 Module printing summary of the contract
3 """
4 import logging
5
6 from slither.printers.abstract_printer import AbstractPrinter
7 from slither.utils import output
8 from slither.utils.code_complexity import compute_cyclomatic_complexity
9 from slither.utils.colors import green, red, yellow
10 from slither.utils.standard_libraries import is_standard_library
11
12 class PrinterHumanSummary(AbstractPrinter):
13 ARGUMENT = 'human-summary'
14 HELP = 'Print a human-readable summary of the contracts'
15
16 WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary'
17
18 @staticmethod
19 def _get_summary_erc20(contract):
20
21 functions_name = [f.name for f in contract.functions]
22 state_variables = [v.name for v in contract.state_variables]
23
24 pause = 'pause' in functions_name
25
26 if 'mint' in functions_name:
27 if not 'mintingFinished' in state_variables:
28 mint_limited = False
29 else:
30 mint_limited = True
31 else:
32 mint_limited = None # no minting
33
34 race_condition_mitigated = 'increaseApproval' in functions_name or\
35 'safeIncreaseAllowance' in functions_name
36
37 return pause, mint_limited, race_condition_mitigated
38
39
40 def get_summary_erc20(self, contract):
41 txt = ''
42
43 pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)
44
45 if pause:
46 txt += "\t\t Can be paused? : {}\n".format(yellow('Yes'))
47 else:
48 txt += "\t\t Can be paused? : {}\n".format(green('No'))
49
50 if mint_limited is None:
51 txt += "\t\t Minting restriction? : {}\n".format(green('No Minting'))
52 else:
53 if mint_limited:
54 txt += "\t\t Minting restriction? : {}\n".format(red('Yes'))
55 else:
56 txt += "\t\t Minting restriction? : {}\n".format(yellow('No'))
57
58 if race_condition_mitigated:
59 txt += "\t\t ERC20 race condition mitigation: {}\n".format(green('Yes'))
60 else:
61 txt += "\t\t ERC20 race condition mitigation: {}\n".format(red('No'))
62
63 return txt
64
65 def _get_detectors_result(self):
66 # disable detectors logger
67 logger = logging.getLogger('Detectors')
68 logger.setLevel(logging.ERROR)
69
70 checks_optimization = self.slither.detectors_optimization
71 checks_informational = self.slither.detectors_informational
72 checks_low = self.slither.detectors_low
73 checks_medium = self.slither.detectors_medium
74 checks_high = self.slither.detectors_high
75
76 issues_optimization = [c.detect() for c in checks_optimization]
77 issues_optimization = [c for c in issues_optimization if c]
78 issues_optimization = [item for sublist in issues_optimization for item in sublist]
79
80 issues_informational = [c.detect() for c in checks_informational]
81 issues_informational = [c for c in issues_informational if c]
82 issues_informational = [item for sublist in issues_informational for item in sublist]
83
84 issues_low = [c.detect() for c in checks_low]
85 issues_low = [c for c in issues_low if c]
86 issues_low = [item for sublist in issues_low for item in sublist]
87
88 issues_medium = (c.detect() for c in checks_medium)
89 issues_medium = [c for c in issues_medium if c]
90 issues_medium = [item for sublist in issues_medium for item in sublist]
91
92 issues_high = [c.detect() for c in checks_high]
93 issues_high = [c for c in issues_high if c]
94 issues_high = [item for sublist in issues_high for item in sublist]
95
96
97
98 return (len(issues_optimization),
99 len(issues_informational),
100 len(issues_low),
101 len(issues_medium),
102 len(issues_high))
103
104 def get_detectors_result(self):
105 issues_optimization, issues_informational, issues_low, issues_medium, issues_high = self._get_detectors_result()
106 txt = "Number of optimization issues: {}\n".format(green(issues_optimization))
107 txt += "Number of informational issues: {}\n".format(green(issues_informational))
108 txt += "Number of low issues: {}\n".format(green(issues_low))
109 if issues_medium > 0:
110 txt += "Number of medium issues: {}\n".format(yellow(issues_medium))
111 else:
112 txt += "Number of medium issues: {}\n".format(green(issues_medium))
113 if issues_high > 0:
114 txt += "Number of high issues: {}\n".format(red(issues_high))
115 else:
116 txt += "Number of high issues: {}\n\n".format(green(issues_high))
117
118 return txt
119
120 @staticmethod
121 def _is_complex_code(contract):
122 for f in contract.functions:
123 if compute_cyclomatic_complexity(f) > 7:
124 return True
125 return False
126
127 def is_complex_code(self, contract):
128 """
129 Check if the code is complex
130 Heuristic, the code is complex if:
131 - One function has a cyclomatic complexity > 7
132 Args:
133 contract
134 """
135
136 is_complex = self._is_complex_code(contract)
137
138 result = red('Yes') if is_complex else green('No')
139
140 return "\tComplex code? {}\n".format(result)
141
142 @staticmethod
143 def _number_functions(contract):
144 return len(contract.functions)
145
146 def _lines_number(self):
147 if not self.slither.source_code:
148 return None
149 total_dep_lines = 0
150 total_lines = 0
151 for filename, source_code in self.slither.source_code.items():
152 lines = len(source_code.splitlines())
153 is_dep = False
154 if self.slither.crytic_compile:
155 is_dep = self.slither.crytic_compile.is_dependency(filename)
156 if is_dep:
157 total_dep_lines += lines
158 else:
159 total_lines += lines
160 return total_lines, total_dep_lines
161
162 def _compilation_type(self):
163 if self.slither.crytic_compile is None:
164 return 'Compilation non standard\n'
165 return f'Compiled with {self.slither.crytic_compile.type}\n'
166
167 def _number_contracts(self):
168 if self.slither.crytic_compile is None:
169 len(self.slither.contracts), 0
170 deps = [c for c in self.slither.contracts if c.is_from_dependency()]
171 contracts = [c for c in self.slither.contracts if not c.is_from_dependency()]
172 return len(contracts), len(deps)
173
174 def _standard_libraries(self):
175 libraries = []
176 for contract in self.contracts:
177 lib = is_standard_library(contract)
178 if lib:
179 libraries.append(lib)
180
181 return libraries
182
183 def _ercs(self):
184 ercs = []
185 for contract in self.contracts:
186 ercs += contract.ercs()
187 return list(set(ercs))
188
189 def output(self, _filename):
190 """
191 _filename is not used
192 Args:
193 _filename(string)
194 """
195
196 txt = "\n"
197 txt += self._compilation_type()
198
199 results = {
200 'contracts': {
201 "elements": []
202 },
203 'number_lines': 0,
204 'number_lines_in_dependencies': 0,
205 'standard_libraries': [],
206 'ercs': [],
207 }
208
209
210 lines_number = self._lines_number()
211 if lines_number:
212 total_lines, total_dep_lines = lines_number
213 txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\n'
214 results['number_lines'] = total_lines
215 results['number_lines__dependencies'] = total_dep_lines
216
217 number_contracts, number_contracts_deps = self._number_contracts()
218 txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \n\n'
219
220 txt += self.get_detectors_result()
221
222 libs = self._standard_libraries()
223 if libs:
224 txt += f'\nUse: {", ".join(libs)}\n'
225 results['standard_libraries'] = [str(l) for l in libs]
226
227 ercs = self._ercs()
228 if ercs:
229 txt += f'ERCs: {", ".join(ercs)}\n'
230 results['ercs'] = [str(e) for e in ercs]
231
232 for contract in self.slither.contracts_derived:
233 txt += "\nContract {}\n".format(contract.name)
234 txt += self.is_complex_code(contract)
235 txt += '\tNumber of functions: {}\n'.format(self._number_functions(contract))
236 ercs = contract.ercs()
237 if ercs:
238 txt += '\tERCs: ' + ','.join(ercs) + '\n'
239 is_erc20 = contract.is_erc20()
240 if is_erc20:
241 txt += '\tERC20 info:\n'
242 txt += self.get_summary_erc20(contract)
243
244 self.info(txt)
245
246 results_contract = output.Output('')
247 for contract in self.slither.contracts_derived:
248 optimization, info, low, medium, high = self._get_detectors_result()
249 contract_d = {'contract_name': contract.name,
250 'is_complex_code': self._is_complex_code(contract),
251 'optimization_issues': optimization,
252 'informational_issues': info,
253 'low_issues': low,
254 'medium_issues': medium,
255 'high_issues': high,
256 'is_erc20': contract.is_erc20(),
257 'number_functions': self._number_functions(contract)}
258 if contract_d['is_erc20']:
259 pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)
260 contract_d['erc20_pause'] = pause
261 if mint_limited is not None:
262 contract_d['erc20_can_mint'] = True
263 contract_d['erc20_mint_limited'] = mint_limited
264 else:
265 contract_d['erc20_can_mint'] = False
266 contract_d['erc20_race_condition_mitigated'] = race_condition_mitigated
267
268 results_contract.add_contract(contract, additional_fields=contract_d)
269
270 results['contracts']['elements'] = results_contract.elements
271
272 json = self.generate_output(txt, additional_fields=results)
273
274 return json
275
276
[end of slither/printers/summary/human_summary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slither/printers/summary/human_summary.py b/slither/printers/summary/human_summary.py
--- a/slither/printers/summary/human_summary.py
+++ b/slither/printers/summary/human_summary.py
@@ -8,6 +8,8 @@
from slither.utils.code_complexity import compute_cyclomatic_complexity
from slither.utils.colors import green, red, yellow
from slither.utils.standard_libraries import is_standard_library
+from slither.core.cfg.node import NodeType
+
class PrinterHumanSummary(AbstractPrinter):
ARGUMENT = 'human-summary'
@@ -29,14 +31,13 @@
else:
mint_limited = True
else:
- mint_limited = None # no minting
+ mint_limited = None # no minting
- race_condition_mitigated = 'increaseApproval' in functions_name or\
+ race_condition_mitigated = 'increaseApproval' in functions_name or \
'safeIncreaseAllowance' in functions_name
return pause, mint_limited, race_condition_mitigated
-
def get_summary_erc20(self, contract):
txt = ''
@@ -93,8 +94,6 @@
issues_high = [c for c in issues_high if c]
issues_high = [item for sublist in issues_high for item in sublist]
-
-
return (len(issues_optimization),
len(issues_informational),
len(issues_low),
@@ -159,6 +158,17 @@
total_lines += lines
return total_lines, total_dep_lines
+ def _get_number_of_assembly_lines(self):
+ total_asm_lines = 0
+ for contract in self.contracts:
+ for function in contract.functions_declared:
+ for node in function.nodes:
+ if node.type == NodeType.ASSEMBLY:
+ inline_asm = node.inline_asm
+ if inline_asm:
+ total_asm_lines += len(inline_asm.splitlines())
+ return total_asm_lines
+
def _compilation_type(self):
if self.slither.crytic_compile is None:
return 'Compilation non standard\n'
@@ -202,17 +212,20 @@
},
'number_lines': 0,
'number_lines_in_dependencies': 0,
+ 'number_lines_assembly': 0,
'standard_libraries': [],
'ercs': [],
}
-
lines_number = self._lines_number()
if lines_number:
total_lines, total_dep_lines = lines_number
txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\n'
results['number_lines'] = total_lines
results['number_lines__dependencies'] = total_dep_lines
+ total_asm_lines = self._get_number_of_assembly_lines()
+ txt += f"Number of assembly lines: {total_asm_lines}\n"
+ results['number_lines_assembly'] = total_asm_lines
number_contracts, number_contracts_deps = self._number_contracts()
txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \n\n'
@@ -272,4 +285,3 @@
json = self.generate_output(txt, additional_fields=results)
return json
-
|
{"golden_diff": "diff --git a/slither/printers/summary/human_summary.py b/slither/printers/summary/human_summary.py\n--- a/slither/printers/summary/human_summary.py\n+++ b/slither/printers/summary/human_summary.py\n@@ -8,6 +8,8 @@\n from slither.utils.code_complexity import compute_cyclomatic_complexity\n from slither.utils.colors import green, red, yellow\n from slither.utils.standard_libraries import is_standard_library\n+from slither.core.cfg.node import NodeType\n+\n \n class PrinterHumanSummary(AbstractPrinter):\n ARGUMENT = 'human-summary'\n@@ -29,14 +31,13 @@\n else:\n mint_limited = True\n else:\n- mint_limited = None # no minting\n+ mint_limited = None # no minting\n \n- race_condition_mitigated = 'increaseApproval' in functions_name or\\\n+ race_condition_mitigated = 'increaseApproval' in functions_name or \\\n 'safeIncreaseAllowance' in functions_name\n \n return pause, mint_limited, race_condition_mitigated\n \n-\n def get_summary_erc20(self, contract):\n txt = ''\n \n@@ -93,8 +94,6 @@\n issues_high = [c for c in issues_high if c]\n issues_high = [item for sublist in issues_high for item in sublist]\n \n-\n-\n return (len(issues_optimization),\n len(issues_informational),\n len(issues_low),\n@@ -159,6 +158,17 @@\n total_lines += lines\n return total_lines, total_dep_lines\n \n+ def _get_number_of_assembly_lines(self):\n+ total_asm_lines = 0\n+ for contract in self.contracts:\n+ for function in contract.functions_declared:\n+ for node in function.nodes:\n+ if node.type == NodeType.ASSEMBLY:\n+ inline_asm = node.inline_asm\n+ if inline_asm:\n+ total_asm_lines += len(inline_asm.splitlines())\n+ return total_asm_lines\n+\n def _compilation_type(self):\n if self.slither.crytic_compile is None:\n return 'Compilation non standard\\n'\n@@ -202,17 +212,20 @@\n },\n 'number_lines': 0,\n 'number_lines_in_dependencies': 0,\n+ 'number_lines_assembly': 0,\n 'standard_libraries': [],\n 'ercs': [],\n }\n \n-\n lines_number = self._lines_number()\n if lines_number:\n total_lines, total_dep_lines = lines_number\n txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\\n'\n results['number_lines'] = total_lines\n results['number_lines__dependencies'] = total_dep_lines\n+ total_asm_lines = self._get_number_of_assembly_lines()\n+ txt += f\"Number of assembly lines: {total_asm_lines}\\n\"\n+ results['number_lines_assembly'] = total_asm_lines\n \n number_contracts, number_contracts_deps = self._number_contracts()\n txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \\n\\n'\n@@ -272,4 +285,3 @@\n json = self.generate_output(txt, additional_fields=results)\n \n return json\n-\n", "issue": "Human-summary printer: show number of lines of assemly code\nThe human summary printer should show if a contract uses inline assembly, and if so, shows the number of lines of assembly code\n", "before_files": [{"content": "\"\"\"\nModule printing summary of the contract\n\"\"\"\nimport logging\n\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.utils import output\nfrom slither.utils.code_complexity import compute_cyclomatic_complexity\nfrom slither.utils.colors import green, red, yellow\nfrom slither.utils.standard_libraries import is_standard_library\n\nclass PrinterHumanSummary(AbstractPrinter):\n ARGUMENT = 'human-summary'\n HELP = 'Print a human-readable summary of the contracts'\n\n WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary'\n\n @staticmethod\n def _get_summary_erc20(contract):\n\n functions_name = [f.name for f in contract.functions]\n state_variables = [v.name for v in contract.state_variables]\n\n pause = 'pause' in functions_name\n\n if 'mint' in functions_name:\n if not 'mintingFinished' in state_variables:\n mint_limited = False\n else:\n mint_limited = True\n else:\n mint_limited = None # no minting\n\n race_condition_mitigated = 'increaseApproval' in functions_name or\\\n 'safeIncreaseAllowance' in functions_name\n\n return pause, mint_limited, race_condition_mitigated\n\n\n def get_summary_erc20(self, contract):\n txt = ''\n\n pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)\n\n if pause:\n txt += \"\\t\\t Can be paused? : {}\\n\".format(yellow('Yes'))\n else:\n txt += \"\\t\\t Can be paused? : {}\\n\".format(green('No'))\n\n if mint_limited is None:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(green('No Minting'))\n else:\n if mint_limited:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(red('Yes'))\n else:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(yellow('No'))\n\n if race_condition_mitigated:\n txt += \"\\t\\t ERC20 race condition mitigation: {}\\n\".format(green('Yes'))\n else:\n txt += \"\\t\\t ERC20 race condition mitigation: {}\\n\".format(red('No'))\n\n return txt\n\n def _get_detectors_result(self):\n # disable detectors logger\n logger = logging.getLogger('Detectors')\n logger.setLevel(logging.ERROR)\n\n checks_optimization = self.slither.detectors_optimization\n checks_informational = self.slither.detectors_informational\n checks_low = self.slither.detectors_low\n checks_medium = self.slither.detectors_medium\n checks_high = self.slither.detectors_high\n\n issues_optimization = [c.detect() for c in checks_optimization]\n issues_optimization = [c for c in issues_optimization if c]\n issues_optimization = [item for sublist in issues_optimization for item in sublist]\n\n issues_informational = [c.detect() for c in checks_informational]\n issues_informational = [c for c in issues_informational if c]\n issues_informational = [item for sublist in issues_informational for item in sublist]\n\n issues_low = [c.detect() for c in checks_low]\n issues_low = [c for c in issues_low if c]\n issues_low = [item for sublist in issues_low for item in sublist]\n\n issues_medium = (c.detect() for c in checks_medium)\n issues_medium = [c for c in issues_medium if c]\n issues_medium = [item for sublist in issues_medium for item in sublist]\n\n issues_high = [c.detect() for c in checks_high]\n issues_high = [c for c in issues_high if c]\n issues_high = [item for sublist in issues_high for item in sublist]\n\n\n\n return (len(issues_optimization),\n len(issues_informational),\n len(issues_low),\n len(issues_medium),\n len(issues_high))\n\n def get_detectors_result(self):\n issues_optimization, issues_informational, issues_low, issues_medium, issues_high = self._get_detectors_result()\n txt = \"Number of optimization issues: {}\\n\".format(green(issues_optimization))\n txt += \"Number of informational issues: {}\\n\".format(green(issues_informational))\n txt += \"Number of low issues: {}\\n\".format(green(issues_low))\n if issues_medium > 0:\n txt += \"Number of medium issues: {}\\n\".format(yellow(issues_medium))\n else:\n txt += \"Number of medium issues: {}\\n\".format(green(issues_medium))\n if issues_high > 0:\n txt += \"Number of high issues: {}\\n\".format(red(issues_high))\n else:\n txt += \"Number of high issues: {}\\n\\n\".format(green(issues_high))\n\n return txt\n\n @staticmethod\n def _is_complex_code(contract):\n for f in contract.functions:\n if compute_cyclomatic_complexity(f) > 7:\n return True\n return False\n\n def is_complex_code(self, contract):\n \"\"\"\n Check if the code is complex\n Heuristic, the code is complex if:\n - One function has a cyclomatic complexity > 7\n Args:\n contract\n \"\"\"\n\n is_complex = self._is_complex_code(contract)\n\n result = red('Yes') if is_complex else green('No')\n\n return \"\\tComplex code? {}\\n\".format(result)\n\n @staticmethod\n def _number_functions(contract):\n return len(contract.functions)\n\n def _lines_number(self):\n if not self.slither.source_code:\n return None\n total_dep_lines = 0\n total_lines = 0\n for filename, source_code in self.slither.source_code.items():\n lines = len(source_code.splitlines())\n is_dep = False\n if self.slither.crytic_compile:\n is_dep = self.slither.crytic_compile.is_dependency(filename)\n if is_dep:\n total_dep_lines += lines\n else:\n total_lines += lines\n return total_lines, total_dep_lines\n\n def _compilation_type(self):\n if self.slither.crytic_compile is None:\n return 'Compilation non standard\\n'\n return f'Compiled with {self.slither.crytic_compile.type}\\n'\n\n def _number_contracts(self):\n if self.slither.crytic_compile is None:\n len(self.slither.contracts), 0\n deps = [c for c in self.slither.contracts if c.is_from_dependency()]\n contracts = [c for c in self.slither.contracts if not c.is_from_dependency()]\n return len(contracts), len(deps)\n\n def _standard_libraries(self):\n libraries = []\n for contract in self.contracts:\n lib = is_standard_library(contract)\n if lib:\n libraries.append(lib)\n\n return libraries\n\n def _ercs(self):\n ercs = []\n for contract in self.contracts:\n ercs += contract.ercs()\n return list(set(ercs))\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = \"\\n\"\n txt += self._compilation_type()\n\n results = {\n 'contracts': {\n \"elements\": []\n },\n 'number_lines': 0,\n 'number_lines_in_dependencies': 0,\n 'standard_libraries': [],\n 'ercs': [],\n }\n\n\n lines_number = self._lines_number()\n if lines_number:\n total_lines, total_dep_lines = lines_number\n txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\\n'\n results['number_lines'] = total_lines\n results['number_lines__dependencies'] = total_dep_lines\n\n number_contracts, number_contracts_deps = self._number_contracts()\n txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \\n\\n'\n\n txt += self.get_detectors_result()\n\n libs = self._standard_libraries()\n if libs:\n txt += f'\\nUse: {\", \".join(libs)}\\n'\n results['standard_libraries'] = [str(l) for l in libs]\n\n ercs = self._ercs()\n if ercs:\n txt += f'ERCs: {\", \".join(ercs)}\\n'\n results['ercs'] = [str(e) for e in ercs]\n\n for contract in self.slither.contracts_derived:\n txt += \"\\nContract {}\\n\".format(contract.name)\n txt += self.is_complex_code(contract)\n txt += '\\tNumber of functions: {}\\n'.format(self._number_functions(contract))\n ercs = contract.ercs()\n if ercs:\n txt += '\\tERCs: ' + ','.join(ercs) + '\\n'\n is_erc20 = contract.is_erc20()\n if is_erc20:\n txt += '\\tERC20 info:\\n'\n txt += self.get_summary_erc20(contract)\n\n self.info(txt)\n\n results_contract = output.Output('')\n for contract in self.slither.contracts_derived:\n optimization, info, low, medium, high = self._get_detectors_result()\n contract_d = {'contract_name': contract.name,\n 'is_complex_code': self._is_complex_code(contract),\n 'optimization_issues': optimization,\n 'informational_issues': info,\n 'low_issues': low,\n 'medium_issues': medium,\n 'high_issues': high,\n 'is_erc20': contract.is_erc20(),\n 'number_functions': self._number_functions(contract)}\n if contract_d['is_erc20']:\n pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)\n contract_d['erc20_pause'] = pause\n if mint_limited is not None:\n contract_d['erc20_can_mint'] = True\n contract_d['erc20_mint_limited'] = mint_limited\n else:\n contract_d['erc20_can_mint'] = False\n contract_d['erc20_race_condition_mitigated'] = race_condition_mitigated\n\n results_contract.add_contract(contract, additional_fields=contract_d)\n\n results['contracts']['elements'] = results_contract.elements\n\n json = self.generate_output(txt, additional_fields=results)\n\n return json\n\n", "path": "slither/printers/summary/human_summary.py"}]}
| 3,614 | 738 |
gh_patches_debug_3910
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-4935
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instrumenting ambiguity: Integrate with SQLAlchemy or not when underlying psycopg2 used
<!--
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
-->
### Summary of problem
It's not immediately clear from documentation if going through the trouble of patching and instrumenting SQLAlchemy provides any additional details over the auto-instrumented psycopg2 via `ddtrace-run`.
I currently have a Flask app using SQLAlchemy by hand (not Flask-SQLAlchemy) which connects to a Postgres database via psycopg2. Running my app with `ddtrace-run` in gunicorn successfully reports the Flask and Postgres services to APM. However upon seeing that ddtrace additionally has an [integration to SQLAlchemy](https://ddtrace.readthedocs.io/en/v1.7.1/integrations.html#sqlalchemy), I'm curious if it provides any added benefit on top of the auto instrumentation. Is it meant to be complimentary? Or is it meant to be an alternative/replacement mechanism for instrumenting the database/what psycopg2 integration would report.
I ending up debugging into `ddtrace.contrib.sqlalchemy.engine.EngineTracer` in my application to see what `Pin.get_from(self.engine)` would report without any overrides. It gave me:
```
Pin(service=postgres, tags=None, tracer=<ddtrace.tracer.Tracer object at 0xffff8bf3e580>)
```
which leads me to believe manual patching and integrating with SQLAlchemy is redundant if I'm using pscopg2 and `ddtrace-run`. Eyeballing the `EngineTracer` source code _appears_ to indicate it adds no additional benefit over psychopg2's auto-instrumentation.
Documentation should be updated (hell, I'd love a confirmatory answer here 😃 ) on whether or not one should even bother going to the trouble of manually patching SQLAlchemy in this context if they already have/use/configure SQLAlchemy's engine to connect to a postgres DB provided by psycopg2. Something to the effect, in SQLAlchemy's section:
> # SQLAlchemy
>
> Integrating with SQLAlchemy is not necessary if you're integrating psycopg and SQLAlchemy connects to a Postgres database.
>
> To trace sqlalchemy queries, add instrumentation to the engine class using the patch method that must be called before importing sqlalchemy:
> ...
### Which version of dd-trace-py are you using?
1.7.1
### Which version of pip are you using?
22.3.1
### Which libraries and their versions are you using?
```
Flask==1.1.1
SQLAlchemy==1.3.11
ddtrace==1.7.1
psycopg2-binary==2.9.1
```
### How can we reproduce your problem?
N/A
### What is the result that you get?
N/A
### What is the result that you expected?
N/A
</issue>
<code>
[start of ddtrace/contrib/sqlalchemy/__init__.py]
1 """
2 To trace sqlalchemy queries, add instrumentation to the engine class
3 using the patch method that **must be called before** importing sqlalchemy::
4
5 # patch before importing `create_engine`
6 from ddtrace import Pin, patch
7 patch(sqlalchemy=True)
8
9 # use SQLAlchemy as usual
10 from sqlalchemy import create_engine
11
12 engine = create_engine('sqlite:///:memory:')
13 engine.connect().execute("SELECT COUNT(*) FROM users")
14
15 # Use a PIN to specify metadata related to this engine
16 Pin.override(engine, service='replica-db')
17 """
18 from ...internal.utils.importlib import require_modules
19
20
21 required_modules = ["sqlalchemy", "sqlalchemy.event"]
22
23 with require_modules(required_modules) as missing_modules:
24 if not missing_modules:
25 from .engine import trace_engine
26 from .patch import patch
27 from .patch import unpatch
28
29 __all__ = ["trace_engine", "patch", "unpatch"]
30
[end of ddtrace/contrib/sqlalchemy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py
--- a/ddtrace/contrib/sqlalchemy/__init__.py
+++ b/ddtrace/contrib/sqlalchemy/__init__.py
@@ -1,4 +1,8 @@
"""
+Enabling the SQLAlchemy integration is only necessary if there is no
+instrumentation available or enabled for the underlying database engine (e.g.
+pymysql, psycopg, mysql-connector, etc.).
+
To trace sqlalchemy queries, add instrumentation to the engine class
using the patch method that **must be called before** importing sqlalchemy::
|
{"golden_diff": "diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py\n--- a/ddtrace/contrib/sqlalchemy/__init__.py\n+++ b/ddtrace/contrib/sqlalchemy/__init__.py\n@@ -1,4 +1,8 @@\n \"\"\"\n+Enabling the SQLAlchemy integration is only necessary if there is no\n+instrumentation available or enabled for the underlying database engine (e.g.\n+pymysql, psycopg, mysql-connector, etc.).\n+\n To trace sqlalchemy queries, add instrumentation to the engine class\n using the patch method that **must be called before** importing sqlalchemy::\n", "issue": "Instrumenting ambiguity: Integrate with SQLAlchemy or not when underlying psycopg2 used\n<!--\r\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n-->\r\n\r\n### Summary of problem\r\n\r\nIt's not immediately clear from documentation if going through the trouble of patching and instrumenting SQLAlchemy provides any additional details over the auto-instrumented psycopg2 via `ddtrace-run`.\r\n\r\nI currently have a Flask app using SQLAlchemy by hand (not Flask-SQLAlchemy) which connects to a Postgres database via psycopg2. Running my app with `ddtrace-run` in gunicorn successfully reports the Flask and Postgres services to APM. However upon seeing that ddtrace additionally has an [integration to SQLAlchemy](https://ddtrace.readthedocs.io/en/v1.7.1/integrations.html#sqlalchemy), I'm curious if it provides any added benefit on top of the auto instrumentation. Is it meant to be complimentary? Or is it meant to be an alternative/replacement mechanism for instrumenting the database/what psycopg2 integration would report.\r\n\r\nI ending up debugging into `ddtrace.contrib.sqlalchemy.engine.EngineTracer` in my application to see what `Pin.get_from(self.engine)` would report without any overrides. It gave me:\r\n\r\n```\r\nPin(service=postgres, tags=None, tracer=<ddtrace.tracer.Tracer object at 0xffff8bf3e580>)\r\n```\r\n\r\nwhich leads me to believe manual patching and integrating with SQLAlchemy is redundant if I'm using pscopg2 and `ddtrace-run`. Eyeballing the `EngineTracer` source code _appears_ to indicate it adds no additional benefit over psychopg2's auto-instrumentation.\r\n\r\nDocumentation should be updated (hell, I'd love a confirmatory answer here \ud83d\ude03 ) on whether or not one should even bother going to the trouble of manually patching SQLAlchemy in this context if they already have/use/configure SQLAlchemy's engine to connect to a postgres DB provided by psycopg2. Something to the effect, in SQLAlchemy's section:\r\n\r\n> # SQLAlchemy\r\n> \r\n> Integrating with SQLAlchemy is not necessary if you're integrating psycopg and SQLAlchemy connects to a Postgres database.\r\n>\r\n> To trace sqlalchemy queries, add instrumentation to the engine class using the patch method that must be called before importing sqlalchemy:\r\n> ...\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n1.7.1\r\n\r\n### Which version of pip are you using?\r\n\r\n22.3.1\r\n\r\n### Which libraries and their versions are you using?\r\n\r\n```\r\nFlask==1.1.1\r\nSQLAlchemy==1.3.11\r\nddtrace==1.7.1\r\npsycopg2-binary==2.9.1\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\nN/A\r\n\r\n### What is the result that you get?\r\n\r\nN/A\r\n\r\n### What is the result that you expected?\r\n\r\nN/A\n", "before_files": [{"content": "\"\"\"\nTo trace sqlalchemy queries, add instrumentation to the engine class\nusing the patch method that **must be called before** importing sqlalchemy::\n\n # patch before importing `create_engine`\n from ddtrace import Pin, patch\n patch(sqlalchemy=True)\n\n # use SQLAlchemy as usual\n from sqlalchemy import create_engine\n\n engine = create_engine('sqlite:///:memory:')\n engine.connect().execute(\"SELECT COUNT(*) FROM users\")\n\n # Use a PIN to specify metadata related to this engine\n Pin.override(engine, service='replica-db')\n\"\"\"\nfrom ...internal.utils.importlib import require_modules\n\n\nrequired_modules = [\"sqlalchemy\", \"sqlalchemy.event\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .engine import trace_engine\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"trace_engine\", \"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/sqlalchemy/__init__.py"}]}
| 1,435 | 132 |
gh_patches_debug_19439
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1138
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IndexError when conducting spatial join with empty polygons
Hey, not sure if this addressed elsewhere, but a search of past results suggests it's not.
```python
import geopandas, numpy, shapely.geometry as geom
center = geom.Point((0,0))
circle = center.buffer(1)
empty = center.buffer(0)
empty_df = geopandas.GeoDataFrame(geometry=[circle, empty])
points = [geom.Point(x) for x in numpy.random.random(size=(10,2))]
points_df = geopandas.GeoDataFrame(geometry=points)
geopandas.sjoin(points_df, empty_df, op='intersects') # raises an IndexError but completes
```
I get a result from this join, but I also get an `IndexError`:
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
_ctypes/callbacks.c in 'calling callback function'()
~/anaconda3/envs/ana/lib/python3.7/site-packages/rtree/index.py in py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length)
708 # xmin, xmax, ymin, ymax, zmin, zmax
709 for i in range(dimension):
--> 710 mins[i] = coordinates[i*2]
711 maxs[i] = coordinates[(i*2)+1]
712
IndexError: list index out of range
```
Not sure how this `IndexError` relates to the result, but I didn't see another issue mentioning this, so I thought I'd raise it.
rtree 0.8.3
geopandas 0.4.1
shapely 1.6.4.post1
</issue>
<code>
[start of geopandas/tools/sjoin.py]
1 from warnings import warn
2
3 import numpy as np
4 import pandas as pd
5
6 from shapely import prepared
7
8 from geopandas import GeoDataFrame
9
10
11 def sjoin(
12 left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
13 ):
14 """Spatial join of two GeoDataFrames.
15
16 Parameters
17 ----------
18 left_df, right_df : GeoDataFrames
19 how : string, default 'inner'
20 The type of join:
21
22 * 'left': use keys from left_df; retain only left_df geometry column
23 * 'right': use keys from right_df; retain only right_df geometry column
24 * 'inner': use intersection of keys from both dfs; retain only
25 left_df geometry column
26 op : string, default 'intersection'
27 Binary predicate, one of {'intersects', 'contains', 'within'}.
28 See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
29 lsuffix : string, default 'left'
30 Suffix to apply to overlapping column names (left GeoDataFrame).
31 rsuffix : string, default 'right'
32 Suffix to apply to overlapping column names (right GeoDataFrame).
33
34 """
35 if not isinstance(left_df, GeoDataFrame):
36 raise ValueError(
37 "'left_df' should be GeoDataFrame, got {}".format(type(left_df))
38 )
39
40 if not isinstance(right_df, GeoDataFrame):
41 raise ValueError(
42 "'right_df' should be GeoDataFrame, got {}".format(type(right_df))
43 )
44
45 allowed_hows = ["left", "right", "inner"]
46 if how not in allowed_hows:
47 raise ValueError(
48 '`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
49 )
50
51 allowed_ops = ["contains", "within", "intersects"]
52 if op not in allowed_ops:
53 raise ValueError(
54 '`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
55 )
56
57 if left_df.crs != right_df.crs:
58 warn(
59 (
60 "CRS of frames being joined does not match!"
61 "(%s != %s)" % (left_df.crs, right_df.crs)
62 )
63 )
64
65 index_left = "index_%s" % lsuffix
66 index_right = "index_%s" % rsuffix
67
68 # due to GH 352
69 if any(left_df.columns.isin([index_left, index_right])) or any(
70 right_df.columns.isin([index_left, index_right])
71 ):
72 raise ValueError(
73 "'{0}' and '{1}' cannot be names in the frames being"
74 " joined".format(index_left, index_right)
75 )
76
77 # Attempt to re-use spatial indexes, otherwise generate the spatial index
78 # for the longer dataframe
79 if right_df._sindex_generated or (
80 not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
81 ):
82 tree_idx = right_df.sindex
83 tree_idx_right = True
84 else:
85 tree_idx = left_df.sindex
86 tree_idx_right = False
87
88 # the rtree spatial index only allows limited (numeric) index types, but an
89 # index in geopandas may be any arbitrary dtype. so reset both indices now
90 # and store references to the original indices, to be reaffixed later.
91 # GH 352
92 left_df = left_df.copy(deep=True)
93 left_df.index = left_df.index.rename(index_left)
94 left_df = left_df.reset_index()
95 right_df = right_df.copy(deep=True)
96 right_df.index = right_df.index.rename(index_right)
97 right_df = right_df.reset_index()
98
99 if op == "within":
100 # within implemented as the inverse of contains; swap names
101 left_df, right_df = right_df, left_df
102 tree_idx_right = not tree_idx_right
103
104 r_idx = np.empty((0, 0))
105 l_idx = np.empty((0, 0))
106 # get rtree spatial index
107 if tree_idx_right:
108 idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
109 lambda x: list(tree_idx.intersection(x))
110 )
111 idxmatch = idxmatch[idxmatch.apply(len) > 0]
112 # indexes of overlapping boundaries
113 if idxmatch.shape[0] > 0:
114 r_idx = np.concatenate(idxmatch.values)
115 l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
116 else:
117 # tree_idx_df == 'left'
118 idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
119 lambda x: list(tree_idx.intersection(x))
120 )
121 idxmatch = idxmatch[idxmatch.apply(len) > 0]
122 if idxmatch.shape[0] > 0:
123 # indexes of overlapping boundaries
124 l_idx = np.concatenate(idxmatch.values)
125 r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
126
127 if len(r_idx) > 0 and len(l_idx) > 0:
128 # Vectorize predicate operations
129 def find_intersects(a1, a2):
130 return a1.intersects(a2)
131
132 def find_contains(a1, a2):
133 return a1.contains(a2)
134
135 predicate_d = {
136 "intersects": find_intersects,
137 "contains": find_contains,
138 "within": find_contains,
139 }
140
141 check_predicates = np.vectorize(predicate_d[op])
142
143 result = pd.DataFrame(
144 np.column_stack(
145 [
146 l_idx,
147 r_idx,
148 check_predicates(
149 left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
150 right_df[right_df.geometry.name][r_idx],
151 ),
152 ]
153 )
154 )
155
156 result.columns = ["_key_left", "_key_right", "match_bool"]
157 result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
158 "match_bool", axis=1
159 )
160
161 else:
162 # when output from the join has no overlapping geometries
163 result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
164
165 if op == "within":
166 # within implemented as the inverse of contains; swap names
167 left_df, right_df = right_df, left_df
168 result = result.rename(
169 columns={"_key_left": "_key_right", "_key_right": "_key_left"}
170 )
171
172 if how == "inner":
173 result = result.set_index("_key_left")
174 joined = left_df.merge(result, left_index=True, right_index=True).merge(
175 right_df.drop(right_df.geometry.name, axis=1),
176 left_on="_key_right",
177 right_index=True,
178 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
179 )
180 joined = joined.set_index(index_left).drop(["_key_right"], axis=1)
181 joined.index.name = None
182 elif how == "left":
183 result = result.set_index("_key_left")
184 joined = left_df.merge(
185 result, left_index=True, right_index=True, how="left"
186 ).merge(
187 right_df.drop(right_df.geometry.name, axis=1),
188 how="left",
189 left_on="_key_right",
190 right_index=True,
191 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
192 )
193 joined = joined.set_index(index_left).drop(["_key_right"], axis=1)
194 joined.index.name = None
195 else: # how == 'right':
196 joined = (
197 left_df.drop(left_df.geometry.name, axis=1)
198 .merge(
199 result.merge(
200 right_df, left_on="_key_right", right_index=True, how="right"
201 ),
202 left_index=True,
203 right_on="_key_left",
204 how="right",
205 )
206 .set_index(index_right)
207 )
208 joined = joined.drop(["_key_left", "_key_right"], axis=1)
209
210 return joined
211
[end of geopandas/tools/sjoin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py
--- a/geopandas/tools/sjoin.py
+++ b/geopandas/tools/sjoin.py
@@ -106,7 +106,7 @@
# get rtree spatial index
if tree_idx_right:
idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
- lambda x: list(tree_idx.intersection(x))
+ lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
# indexes of overlapping boundaries
@@ -116,7 +116,7 @@
else:
# tree_idx_df == 'left'
idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
- lambda x: list(tree_idx.intersection(x))
+ lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
|
{"golden_diff": "diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py\n--- a/geopandas/tools/sjoin.py\n+++ b/geopandas/tools/sjoin.py\n@@ -106,7 +106,7 @@\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n- lambda x: list(tree_idx.intersection(x))\n+ lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n@@ -116,7 +116,7 @@\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n- lambda x: list(tree_idx.intersection(x))\n+ lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n", "issue": "IndexError when conducting spatial join with empty polygons\nHey, not sure if this addressed elsewhere, but a search of past results suggests it's not. \r\n\r\n```python\r\nimport geopandas, numpy, shapely.geometry as geom\r\n\r\ncenter = geom.Point((0,0))\r\ncircle = center.buffer(1)\r\nempty = center.buffer(0)\r\n\r\nempty_df = geopandas.GeoDataFrame(geometry=[circle, empty])\r\n\r\npoints = [geom.Point(x) for x in numpy.random.random(size=(10,2))]\r\npoints_df = geopandas.GeoDataFrame(geometry=points)\r\n\r\ngeopandas.sjoin(points_df, empty_df, op='intersects') # raises an IndexError but completes\r\n```\r\nI get a result from this join, but I also get an `IndexError`:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n_ctypes/callbacks.c in 'calling callback function'()\r\n\r\n~/anaconda3/envs/ana/lib/python3.7/site-packages/rtree/index.py in py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length)\r\n 708 # xmin, xmax, ymin, ymax, zmin, zmax\r\n 709 for i in range(dimension):\r\n--> 710 mins[i] = coordinates[i*2]\r\n 711 maxs[i] = coordinates[(i*2)+1]\r\n 712 \r\n\r\nIndexError: list index out of range\r\n```\r\nNot sure how this `IndexError` relates to the result, but I didn't see another issue mentioning this, so I thought I'd raise it. \r\n\r\nrtree 0.8.3\r\ngeopandas 0.4.1\r\nshapely 1.6.4.post1\r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n right_df = right_df.copy(deep=True)\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x))\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x))\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = left_df.merge(result, left_index=True, right_index=True).merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n joined = joined.set_index(index_left).drop([\"_key_right\"], axis=1)\n joined.index.name = None\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = left_df.merge(\n result, left_index=True, right_index=True, how=\"left\"\n ).merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n joined = joined.set_index(index_left).drop([\"_key_right\"], axis=1)\n joined.index.name = None\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n )\n joined = joined.drop([\"_key_left\", \"_key_right\"], axis=1)\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}]}
| 3,197 | 237 |
gh_patches_debug_7766
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-889
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Return empty json for empty bundles in ASR
Return empty json for empty bundles in ASR. Unclear whether ASR can handle 204 responses so let's default to empty json.
See also https://bugzilla.mozilla.org/show_bug.cgi?id=1511640
History:
- https://github.com/mozmeao/snippets-service/commit/4936f2371e341690def9be40a72abdd32205b0c9
- https://github.com/mozmeao/snippets-service/commit/cb0fc0ca63af4ed3786342e17c2f50d2fc6dc5c4
</issue>
<code>
[start of snippets/base/views.py]
1 import json
2 import logging
3
4 from distutils.util import strtobool
5
6 from django.conf import settings
7 from django.contrib.auth.decorators import permission_required
8 from django.core.exceptions import ValidationError
9 from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
10 from django.shortcuts import get_object_or_404, render
11 from django.utils.functional import lazy
12 from django.views.decorators.cache import cache_control
13 from django.views.decorators.csrf import csrf_exempt
14 from django.views.decorators.http import require_POST
15 from django.views.generic import TemplateView
16
17 from django_statsd.clients import statsd
18 from raven.contrib.django.models import client as sentry_client
19
20 from snippets.base import util
21 from snippets.base.bundles import ASRSnippetBundle, SnippetBundle
22 from snippets.base.decorators import access_control
23 from snippets.base.encoders import JSONSnippetEncoder
24 from snippets.base.models import ASRSnippet, Client, JSONSnippet, Snippet, SnippetTemplate
25 from snippets.base.util import get_object_or_none
26
27
28 def _bundle_timeout():
29 return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')
30 SNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa
31
32
33 class HomeView(TemplateView):
34 template_name = 'base/home.jinja'
35
36
37 @cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)
38 @access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)
39 def fetch_snippets(request, **kwargs):
40 """
41 Return one of the following responses:
42 - 200 with empty body when the bundle is empty
43 - 302 to a bundle URL after generating it if not cached.
44 """
45 statsd.incr('serve.snippets')
46
47 client = Client(**kwargs)
48 if client.startpage_version == 6:
49 bundle = ASRSnippetBundle(client)
50 else:
51 bundle = SnippetBundle(client)
52 if bundle.empty:
53 statsd.incr('bundle.empty')
54 # This is not a 204 because Activity Stream expects content, even if
55 # it's empty.
56 return HttpResponse(status=200, content='')
57 elif bundle.cached:
58 statsd.incr('bundle.cached')
59 else:
60 statsd.incr('bundle.generate')
61 bundle.generate()
62
63 return HttpResponseRedirect(bundle.url)
64
65
66 @cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)
67 @access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)
68 def fetch_json_snippets(request, **kwargs):
69 statsd.incr('serve.json_snippets')
70 client = Client(**kwargs)
71 matching_snippets = (JSONSnippet.objects
72 .filter(published=True)
73 .match_client(client)
74 .filter_by_available())
75 return HttpResponse(json.dumps(matching_snippets, cls=JSONSnippetEncoder),
76 content_type='application/json')
77
78
79 def preview_asr_snippet(request, uuid):
80 try:
81 snippet = get_object_or_404(ASRSnippet, uuid=uuid)
82 except ValidationError:
83 # Raised when UUID is a badly formed hexadecimal UUID string
84 raise Http404()
85
86 bundle_content = json.dumps({
87 'messages': [snippet.render(preview=True)],
88 })
89 return HttpResponse(bundle_content, content_type='application/json')
90
91
92 @csrf_exempt
93 @permission_required('base.change_snippet')
94 def preview_snippet(request):
95 """
96 Build a snippet using info from the POST parameters, and preview that
97 snippet on a mock about:home page.
98 """
99 try:
100 template_id = int(request.POST.get('template_id', None))
101 except (TypeError, ValueError):
102 return HttpResponseBadRequest()
103
104 template = get_object_or_none(SnippetTemplate, id=template_id)
105 data = request.POST.get('data', None)
106
107 # Validate that data is JSON.
108 try:
109 json.loads(data)
110 except (TypeError, ValueError):
111 data = None
112
113 # If your parameters are wrong, I have no sympathy for you.
114 if data is None or template is None:
115 return HttpResponseBadRequest()
116
117 # Build a snippet that isn't saved so we can render it.
118 snippet = Snippet(template=template, data=data)
119
120 if strtobool(request.POST.get('activity_stream', 'false')):
121 template_name = 'base/preview_as.jinja'
122 preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',
123 'release', 'default', 'default', 'default')
124 else:
125 template_name = 'base/preview.jinja'
126 preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',
127 'release', 'default', 'default', 'default')
128
129 skip_boilerplate = request.POST.get('skip_boilerplate', 'false')
130 skip_boilerplate = strtobool(skip_boilerplate)
131 if skip_boilerplate:
132 template_name = 'base/preview_without_shell.jinja'
133
134 return render(request, template_name, {
135 'snippets_json': json.dumps([snippet.to_dict()]),
136 'client': preview_client,
137 'preview': True,
138 'current_firefox_major_version': util.current_firefox_major_version(),
139 })
140
141
142 def show_snippet(request, snippet_id, uuid=False):
143 preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',
144 'release', 'default', 'default', 'default')
145
146 if uuid:
147 snippet = get_object_or_404(Snippet, uuid=snippet_id)
148 else:
149 snippet = get_object_or_404(Snippet, pk=snippet_id)
150 if not snippet.published and not request.user.is_authenticated:
151 raise Http404()
152
153 template = 'base/preview.jinja'
154 if snippet.on_startpage_5:
155 template = 'base/preview_as.jinja'
156 return render(request, template, {
157 'snippets_json': json.dumps([snippet.to_dict()]),
158 'client': preview_client,
159 'preview': True,
160 'current_firefox_major_version': util.current_firefox_major_version(),
161 })
162
163
164 @csrf_exempt
165 @require_POST
166 def csp_violation_capture(request):
167 data = sentry_client.get_data_from_request(request)
168 data.update({
169 'level': logging.INFO,
170 'logger': 'CSP',
171 })
172 try:
173 csp_data = json.loads(request.body)
174 except ValueError:
175 # Cannot decode CSP violation data, ignore
176 return HttpResponseBadRequest('Invalid CSP Report')
177
178 try:
179 blocked_uri = csp_data['csp-report']['blocked-uri']
180 except KeyError:
181 # Incomplete CSP report
182 return HttpResponseBadRequest('Incomplete CSP Report')
183
184 sentry_client.captureMessage(
185 message='CSP Violation: {}'.format(blocked_uri),
186 data=data)
187
188 return HttpResponse('Captured CSP violation, thanks for reporting.')
189
[end of snippets/base/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/snippets/base/views.py b/snippets/base/views.py
--- a/snippets/base/views.py
+++ b/snippets/base/views.py
@@ -51,6 +51,11 @@
bundle = SnippetBundle(client)
if bundle.empty:
statsd.incr('bundle.empty')
+
+ if client.startpage_version == 6:
+ # Return valid JSON for Activity Stream Router
+ return HttpResponse(status=200, content='{}')
+
# This is not a 204 because Activity Stream expects content, even if
# it's empty.
return HttpResponse(status=200, content='')
|
{"golden_diff": "diff --git a/snippets/base/views.py b/snippets/base/views.py\n--- a/snippets/base/views.py\n+++ b/snippets/base/views.py\n@@ -51,6 +51,11 @@\n bundle = SnippetBundle(client)\n if bundle.empty:\n statsd.incr('bundle.empty')\n+\n+ if client.startpage_version == 6:\n+ # Return valid JSON for Activity Stream Router\n+ return HttpResponse(status=200, content='{}')\n+\n # This is not a 204 because Activity Stream expects content, even if\n # it's empty.\n return HttpResponse(status=200, content='')\n", "issue": "Return empty json for empty bundles in ASR\nReturn empty json for empty bundles in ASR. Unclear whether ASR can handle 204 responses so let's default to empty json. \r\n\r\nSee also https://bugzilla.mozilla.org/show_bug.cgi?id=1511640\r\n\r\nHistory:\r\n - https://github.com/mozmeao/snippets-service/commit/4936f2371e341690def9be40a72abdd32205b0c9\r\n - https://github.com/mozmeao/snippets-service/commit/cb0fc0ca63af4ed3786342e17c2f50d2fc6dc5c4\n", "before_files": [{"content": "import json\nimport logging\n\nfrom distutils.util import strtobool\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.exceptions import ValidationError\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.functional import lazy\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import TemplateView\n\nfrom django_statsd.clients import statsd\nfrom raven.contrib.django.models import client as sentry_client\n\nfrom snippets.base import util\nfrom snippets.base.bundles import ASRSnippetBundle, SnippetBundle\nfrom snippets.base.decorators import access_control\nfrom snippets.base.encoders import JSONSnippetEncoder\nfrom snippets.base.models import ASRSnippet, Client, JSONSnippet, Snippet, SnippetTemplate\nfrom snippets.base.util import get_object_or_none\n\n\ndef _bundle_timeout():\n return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')\nSNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa\n\n\nclass HomeView(TemplateView):\n template_name = 'base/home.jinja'\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_snippets(request, **kwargs):\n \"\"\"\n Return one of the following responses:\n - 200 with empty body when the bundle is empty\n - 302 to a bundle URL after generating it if not cached.\n \"\"\"\n statsd.incr('serve.snippets')\n\n client = Client(**kwargs)\n if client.startpage_version == 6:\n bundle = ASRSnippetBundle(client)\n else:\n bundle = SnippetBundle(client)\n if bundle.empty:\n statsd.incr('bundle.empty')\n # This is not a 204 because Activity Stream expects content, even if\n # it's empty.\n return HttpResponse(status=200, content='')\n elif bundle.cached:\n statsd.incr('bundle.cached')\n else:\n statsd.incr('bundle.generate')\n bundle.generate()\n\n return HttpResponseRedirect(bundle.url)\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_json_snippets(request, **kwargs):\n statsd.incr('serve.json_snippets')\n client = Client(**kwargs)\n matching_snippets = (JSONSnippet.objects\n .filter(published=True)\n .match_client(client)\n .filter_by_available())\n return HttpResponse(json.dumps(matching_snippets, cls=JSONSnippetEncoder),\n content_type='application/json')\n\n\ndef preview_asr_snippet(request, uuid):\n try:\n snippet = get_object_or_404(ASRSnippet, uuid=uuid)\n except ValidationError:\n # Raised when UUID is a badly formed hexadecimal UUID string\n raise Http404()\n\n bundle_content = json.dumps({\n 'messages': [snippet.render(preview=True)],\n })\n return HttpResponse(bundle_content, content_type='application/json')\n\n\n@csrf_exempt\n@permission_required('base.change_snippet')\ndef preview_snippet(request):\n \"\"\"\n Build a snippet using info from the POST parameters, and preview that\n snippet on a mock about:home page.\n \"\"\"\n try:\n template_id = int(request.POST.get('template_id', None))\n except (TypeError, ValueError):\n return HttpResponseBadRequest()\n\n template = get_object_or_none(SnippetTemplate, id=template_id)\n data = request.POST.get('data', None)\n\n # Validate that data is JSON.\n try:\n json.loads(data)\n except (TypeError, ValueError):\n data = None\n\n # If your parameters are wrong, I have no sympathy for you.\n if data is None or template is None:\n return HttpResponseBadRequest()\n\n # Build a snippet that isn't saved so we can render it.\n snippet = Snippet(template=template, data=data)\n\n if strtobool(request.POST.get('activity_stream', 'false')):\n template_name = 'base/preview_as.jinja'\n preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n else:\n template_name = 'base/preview.jinja'\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n skip_boilerplate = request.POST.get('skip_boilerplate', 'false')\n skip_boilerplate = strtobool(skip_boilerplate)\n if skip_boilerplate:\n template_name = 'base/preview_without_shell.jinja'\n\n return render(request, template_name, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\ndef show_snippet(request, snippet_id, uuid=False):\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n if uuid:\n snippet = get_object_or_404(Snippet, uuid=snippet_id)\n else:\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n if not snippet.published and not request.user.is_authenticated:\n raise Http404()\n\n template = 'base/preview.jinja'\n if snippet.on_startpage_5:\n template = 'base/preview_as.jinja'\n return render(request, template, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\n@csrf_exempt\n@require_POST\ndef csp_violation_capture(request):\n data = sentry_client.get_data_from_request(request)\n data.update({\n 'level': logging.INFO,\n 'logger': 'CSP',\n })\n try:\n csp_data = json.loads(request.body)\n except ValueError:\n # Cannot decode CSP violation data, ignore\n return HttpResponseBadRequest('Invalid CSP Report')\n\n try:\n blocked_uri = csp_data['csp-report']['blocked-uri']\n except KeyError:\n # Incomplete CSP report\n return HttpResponseBadRequest('Incomplete CSP Report')\n\n sentry_client.captureMessage(\n message='CSP Violation: {}'.format(blocked_uri),\n data=data)\n\n return HttpResponse('Captured CSP violation, thanks for reporting.')\n", "path": "snippets/base/views.py"}]}
| 2,641 | 141 |
gh_patches_debug_24793
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-4095
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reduce warnings in unit tests
### Motivation
The Optuna's unit tests raise many warnings, which make it hard to recognize unexpected warning messages.
### Description
We can filter expected warnings using `@pytest.mark.filterwarnings` or `warnings.simplefilter("ignore")`. Some warnings may be unexpected or unsuitable. We should fix tests or Optuna's codes in such cases.
This issue is contribution-welcome. We welcome any fixes, even how trivial.
### Alternatives (optional)
_No response_
### Additional context (optional)
_No response_
</issue>
<code>
[start of optuna/visualization/matplotlib/_param_importances.py]
1 from typing import Callable
2 from typing import List
3 from typing import Optional
4
5 import numpy as np
6
7 from optuna._experimental import experimental_func
8 from optuna.importance._base import BaseImportanceEvaluator
9 from optuna.logging import get_logger
10 from optuna.study import Study
11 from optuna.trial import FrozenTrial
12 from optuna.visualization._param_importances import _get_importances_info
13 from optuna.visualization._param_importances import _ImportancesInfo
14 from optuna.visualization.matplotlib._matplotlib_imports import _imports
15
16
17 if _imports.is_successful():
18 from optuna.visualization.matplotlib._matplotlib_imports import Axes
19 from optuna.visualization.matplotlib._matplotlib_imports import cm
20 from optuna.visualization.matplotlib._matplotlib_imports import plt
21
22
23 _logger = get_logger(__name__)
24
25
26 AXES_PADDING_RATIO = 1.05
27
28
29 @experimental_func("2.2.0")
30 def plot_param_importances(
31 study: Study,
32 evaluator: Optional[BaseImportanceEvaluator] = None,
33 params: Optional[List[str]] = None,
34 *,
35 target: Optional[Callable[[FrozenTrial], float]] = None,
36 target_name: str = "Objective Value",
37 ) -> "Axes":
38 """Plot hyperparameter importances with Matplotlib.
39
40 .. seealso::
41 Please refer to :func:`optuna.visualization.plot_param_importances` for an example.
42
43 Example:
44
45 The following code snippet shows how to plot hyperparameter importances.
46
47 .. plot::
48
49 import optuna
50
51
52 def objective(trial):
53 x = trial.suggest_int("x", 0, 2)
54 y = trial.suggest_float("y", -1.0, 1.0)
55 z = trial.suggest_float("z", 0.0, 1.5)
56 return x ** 2 + y ** 3 - z ** 4
57
58
59 sampler = optuna.samplers.RandomSampler(seed=10)
60 study = optuna.create_study(sampler=sampler)
61 study.optimize(objective, n_trials=100)
62
63 optuna.visualization.matplotlib.plot_param_importances(study)
64
65 Args:
66 study:
67 An optimized study.
68 evaluator:
69 An importance evaluator object that specifies which algorithm to base the importance
70 assessment on.
71 Defaults to
72 :class:`~optuna.importance.FanovaImportanceEvaluator`.
73 params:
74 A list of names of parameters to assess.
75 If :obj:`None`, all parameters that are present in all of the completed trials are
76 assessed.
77 target:
78 A function to specify the value to display. If it is :obj:`None` and ``study`` is being
79 used for single-objective optimization, the objective values are plotted.
80
81 .. note::
82 Specify this argument if ``study`` is being used for multi-objective
83 optimization. For example, to get the hyperparameter importance of the first
84 objective, use ``target=lambda t: t.values[0]`` for the target parameter.
85 target_name:
86 Target's name to display on the axis label.
87
88 Returns:
89 A :class:`matplotlib.axes.Axes` object.
90 """
91
92 _imports.check()
93
94 importances_info = _get_importances_info(study, evaluator, params, target, target_name)
95 return _get_importances_plot(importances_info)
96
97
98 def _get_importances_plot(info: _ImportancesInfo) -> "Axes":
99 # Set up the graph style.
100 plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
101 fig, ax = plt.subplots()
102 ax.set_title("Hyperparameter Importances")
103 ax.set_xlabel(f"Importance for {info.target_name}")
104 ax.set_ylabel("Hyperparameter")
105
106 param_names = info.param_names
107 pos = np.arange(len(param_names))
108 importance_values = info.importance_values
109
110 if len(importance_values) == 0:
111 return ax
112
113 # Draw horizontal bars.
114 ax.barh(
115 pos,
116 importance_values,
117 align="center",
118 color=cm.get_cmap("tab20c")(0),
119 tick_label=param_names,
120 )
121
122 renderer = fig.canvas.get_renderer()
123 for idx, (val, label) in enumerate(zip(importance_values, info.importance_labels)):
124 text = ax.text(val, idx, label, va="center")
125
126 # Sometimes horizontal axis needs to be re-scaled
127 # to avoid text going over plot area.
128 bbox = text.get_window_extent(renderer)
129 bbox = bbox.transformed(ax.transData.inverted())
130 _, plot_xmax = ax.get_xlim()
131 bbox_xmax = bbox.xmax
132
133 if bbox_xmax > plot_xmax:
134 ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax)
135
136 return ax
137
[end of optuna/visualization/matplotlib/_param_importances.py]
[start of optuna/visualization/matplotlib/_matplotlib_imports.py]
1 from packaging import version
2
3 from optuna._imports import try_import
4
5
6 with try_import() as _imports: # NOQA
7 # TODO(ytknzw): Add specific imports.
8 import matplotlib # NOQA
9 from matplotlib import __version__ as matplotlib_version
10 from matplotlib import cm # NOQA
11 from matplotlib import pyplot as plt # NOQA
12 from matplotlib.axes._axes import Axes # NOQA
13 from matplotlib.collections import LineCollection # NOQA
14 from matplotlib.collections import PathCollection # NOQA
15 from matplotlib.colors import Colormap # NOQA
16 from matplotlib.contour import ContourSet # NOQA
17 from matplotlib.patches import Rectangle # NOQA
18
19 # TODO(ytknzw): Set precise version.
20 if version.parse(matplotlib_version) < version.parse("3.0.0"):
21 raise ImportError(
22 "Your version of Matplotlib is " + matplotlib_version + " . "
23 "Please install Matplotlib version 3.0.0 or higher. "
24 "Matplotlib can be installed by executing `$ pip install -U matplotlib>=3.0.0`. "
25 "For further information, please refer to the installation guide of Matplotlib. ",
26 name="matplotlib",
27 )
28
[end of optuna/visualization/matplotlib/_matplotlib_imports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optuna/visualization/matplotlib/_matplotlib_imports.py b/optuna/visualization/matplotlib/_matplotlib_imports.py
--- a/optuna/visualization/matplotlib/_matplotlib_imports.py
+++ b/optuna/visualization/matplotlib/_matplotlib_imports.py
@@ -7,7 +7,6 @@
# TODO(ytknzw): Add specific imports.
import matplotlib # NOQA
from matplotlib import __version__ as matplotlib_version
- from matplotlib import cm # NOQA
from matplotlib import pyplot as plt # NOQA
from matplotlib.axes._axes import Axes # NOQA
from matplotlib.collections import LineCollection # NOQA
diff --git a/optuna/visualization/matplotlib/_param_importances.py b/optuna/visualization/matplotlib/_param_importances.py
--- a/optuna/visualization/matplotlib/_param_importances.py
+++ b/optuna/visualization/matplotlib/_param_importances.py
@@ -16,7 +16,6 @@
if _imports.is_successful():
from optuna.visualization.matplotlib._matplotlib_imports import Axes
- from optuna.visualization.matplotlib._matplotlib_imports import cm
from optuna.visualization.matplotlib._matplotlib_imports import plt
@@ -115,7 +114,7 @@
pos,
importance_values,
align="center",
- color=cm.get_cmap("tab20c")(0),
+ color=plt.get_cmap("tab20c")(0),
tick_label=param_names,
)
|
{"golden_diff": "diff --git a/optuna/visualization/matplotlib/_matplotlib_imports.py b/optuna/visualization/matplotlib/_matplotlib_imports.py\n--- a/optuna/visualization/matplotlib/_matplotlib_imports.py\n+++ b/optuna/visualization/matplotlib/_matplotlib_imports.py\n@@ -7,7 +7,6 @@\n # TODO(ytknzw): Add specific imports.\n import matplotlib # NOQA\n from matplotlib import __version__ as matplotlib_version\n- from matplotlib import cm # NOQA\n from matplotlib import pyplot as plt # NOQA\n from matplotlib.axes._axes import Axes # NOQA\n from matplotlib.collections import LineCollection # NOQA\ndiff --git a/optuna/visualization/matplotlib/_param_importances.py b/optuna/visualization/matplotlib/_param_importances.py\n--- a/optuna/visualization/matplotlib/_param_importances.py\n+++ b/optuna/visualization/matplotlib/_param_importances.py\n@@ -16,7 +16,6 @@\n \n if _imports.is_successful():\n from optuna.visualization.matplotlib._matplotlib_imports import Axes\n- from optuna.visualization.matplotlib._matplotlib_imports import cm\n from optuna.visualization.matplotlib._matplotlib_imports import plt\n \n \n@@ -115,7 +114,7 @@\n pos,\n importance_values,\n align=\"center\",\n- color=cm.get_cmap(\"tab20c\")(0),\n+ color=plt.get_cmap(\"tab20c\")(0),\n tick_label=param_names,\n )\n", "issue": "Reduce warnings in unit tests\n### Motivation\n\nThe Optuna's unit tests raise many warnings, which make it hard to recognize unexpected warning messages.\n\n### Description\n\nWe can filter expected warnings using `@pytest.mark.filterwarnings` or `warnings.simplefilter(\"ignore\")`. Some warnings may be unexpected or unsuitable. We should fix tests or Optuna's codes in such cases.\r\n\r\nThis issue is contribution-welcome. We welcome any fixes, even how trivial.\n\n### Alternatives (optional)\n\n_No response_\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "from typing import Callable\nfrom typing import List\nfrom typing import Optional\n\nimport numpy as np\n\nfrom optuna._experimental import experimental_func\nfrom optuna.importance._base import BaseImportanceEvaluator\nfrom optuna.logging import get_logger\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.visualization._param_importances import _get_importances_info\nfrom optuna.visualization._param_importances import _ImportancesInfo\nfrom optuna.visualization.matplotlib._matplotlib_imports import _imports\n\n\nif _imports.is_successful():\n from optuna.visualization.matplotlib._matplotlib_imports import Axes\n from optuna.visualization.matplotlib._matplotlib_imports import cm\n from optuna.visualization.matplotlib._matplotlib_imports import plt\n\n\n_logger = get_logger(__name__)\n\n\nAXES_PADDING_RATIO = 1.05\n\n\n@experimental_func(\"2.2.0\")\ndef plot_param_importances(\n study: Study,\n evaluator: Optional[BaseImportanceEvaluator] = None,\n params: Optional[List[str]] = None,\n *,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"Axes\":\n \"\"\"Plot hyperparameter importances with Matplotlib.\n\n .. seealso::\n Please refer to :func:`optuna.visualization.plot_param_importances` for an example.\n\n Example:\n\n The following code snippet shows how to plot hyperparameter importances.\n\n .. plot::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_int(\"x\", 0, 2)\n y = trial.suggest_float(\"y\", -1.0, 1.0)\n z = trial.suggest_float(\"z\", 0.0, 1.5)\n return x ** 2 + y ** 3 - z ** 4\n\n\n sampler = optuna.samplers.RandomSampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=100)\n\n optuna.visualization.matplotlib.plot_param_importances(study)\n\n Args:\n study:\n An optimized study.\n evaluator:\n An importance evaluator object that specifies which algorithm to base the importance\n assessment on.\n Defaults to\n :class:`~optuna.importance.FanovaImportanceEvaluator`.\n params:\n A list of names of parameters to assess.\n If :obj:`None`, all parameters that are present in all of the completed trials are\n assessed.\n target:\n A function to specify the value to display. If it is :obj:`None` and ``study`` is being\n used for single-objective optimization, the objective values are plotted.\n\n .. note::\n Specify this argument if ``study`` is being used for multi-objective\n optimization. For example, to get the hyperparameter importance of the first\n objective, use ``target=lambda t: t.values[0]`` for the target parameter.\n target_name:\n Target's name to display on the axis label.\n\n Returns:\n A :class:`matplotlib.axes.Axes` object.\n \"\"\"\n\n _imports.check()\n\n importances_info = _get_importances_info(study, evaluator, params, target, target_name)\n return _get_importances_plot(importances_info)\n\n\ndef _get_importances_plot(info: _ImportancesInfo) -> \"Axes\":\n # Set up the graph style.\n plt.style.use(\"ggplot\") # Use ggplot style sheet for similar outputs to plotly.\n fig, ax = plt.subplots()\n ax.set_title(\"Hyperparameter Importances\")\n ax.set_xlabel(f\"Importance for {info.target_name}\")\n ax.set_ylabel(\"Hyperparameter\")\n\n param_names = info.param_names\n pos = np.arange(len(param_names))\n importance_values = info.importance_values\n\n if len(importance_values) == 0:\n return ax\n\n # Draw horizontal bars.\n ax.barh(\n pos,\n importance_values,\n align=\"center\",\n color=cm.get_cmap(\"tab20c\")(0),\n tick_label=param_names,\n )\n\n renderer = fig.canvas.get_renderer()\n for idx, (val, label) in enumerate(zip(importance_values, info.importance_labels)):\n text = ax.text(val, idx, label, va=\"center\")\n\n # Sometimes horizontal axis needs to be re-scaled\n # to avoid text going over plot area.\n bbox = text.get_window_extent(renderer)\n bbox = bbox.transformed(ax.transData.inverted())\n _, plot_xmax = ax.get_xlim()\n bbox_xmax = bbox.xmax\n\n if bbox_xmax > plot_xmax:\n ax.set_xlim(xmax=AXES_PADDING_RATIO * bbox_xmax)\n\n return ax\n", "path": "optuna/visualization/matplotlib/_param_importances.py"}, {"content": "from packaging import version\n\nfrom optuna._imports import try_import\n\n\nwith try_import() as _imports: # NOQA\n # TODO(ytknzw): Add specific imports.\n import matplotlib # NOQA\n from matplotlib import __version__ as matplotlib_version\n from matplotlib import cm # NOQA\n from matplotlib import pyplot as plt # NOQA\n from matplotlib.axes._axes import Axes # NOQA\n from matplotlib.collections import LineCollection # NOQA\n from matplotlib.collections import PathCollection # NOQA\n from matplotlib.colors import Colormap # NOQA\n from matplotlib.contour import ContourSet # NOQA\n from matplotlib.patches import Rectangle # NOQA\n\n # TODO(ytknzw): Set precise version.\n if version.parse(matplotlib_version) < version.parse(\"3.0.0\"):\n raise ImportError(\n \"Your version of Matplotlib is \" + matplotlib_version + \" . \"\n \"Please install Matplotlib version 3.0.0 or higher. \"\n \"Matplotlib can be installed by executing `$ pip install -U matplotlib>=3.0.0`. \"\n \"For further information, please refer to the installation guide of Matplotlib. \",\n name=\"matplotlib\",\n )\n", "path": "optuna/visualization/matplotlib/_matplotlib_imports.py"}]}
| 2,353 | 330 |
gh_patches_debug_28053
|
rasdani/github-patches
|
git_diff
|
numba__numba-6281
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reminder to remove the `numba.jitclass` shim
This is a reminder to remove the `numba.jitclass` shim for 0.5x, at the moment x=1. Perhaps x should be 2, so that users have a bit more time. xref schedule: http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#id7
</issue>
<code>
[start of numba/experimental/jitclass/decorators.py]
1 import warnings
2
3 from numba.core import types, config, errors
4 from numba.experimental.jitclass.base import register_class_type, ClassBuilder
5
6
7 def jitclass(spec):
8 """
9 A decorator for creating a jitclass.
10
11 **arguments**:
12
13 - spec:
14 Specifies the types of each field on this class.
15 Must be a dictionary or a sequence.
16 With a dictionary, use collections.OrderedDict for stable ordering.
17 With a sequence, it must contain 2-tuples of (fieldname, fieldtype).
18
19 **returns**:
20
21 A callable that takes a class object, which will be compiled.
22 """
23
24 def wrap(cls):
25 if config.DISABLE_JIT:
26 return cls
27 else:
28 return register_class_type(cls, spec, types.ClassType, ClassBuilder)
29
30 return wrap
31
32
33 def _warning_jitclass(spec):
34 """
35 Re-export of numba.experimental.jitclass with a warning.
36 To be used in numba/__init__.py.
37 This endpoint is deprecated.
38 """
39 url = ("https://numba.pydata.org/numba-doc/latest/reference/"
40 "deprecation.html#change-of-jitclass-location")
41
42 msg = ("The 'numba.jitclass' decorator has moved to "
43 "'numba.experimental.jitclass' to better reflect the experimental "
44 "nature of the functionality. Please update your imports to "
45 "accommodate this change and see {} for the time frame.".format(url))
46
47 warnings.warn(msg, category=errors.NumbaDeprecationWarning,
48 stacklevel=2)
49
50 return jitclass(spec)
51
[end of numba/experimental/jitclass/decorators.py]
[start of numba/__init__.py]
1 """
2 Expose top-level symbols that are safe for import *
3 """
4
5 import platform
6 import re
7 import sys
8 import warnings
9
10 from ._version import get_versions
11 from numba.misc.init_utils import generate_version_info
12
13 __version__ = get_versions()['version']
14 version_info = generate_version_info(__version__)
15 del get_versions
16 del generate_version_info
17
18
19 from numba.core import config
20 from numba.testing import _runtests as runtests
21 from numba.core import types, errors
22
23 # Re-export typeof
24 from numba.misc.special import (
25 typeof, prange, pndindex, gdb, gdb_breakpoint, gdb_init,
26 literally, literal_unroll
27 )
28
29 # Re-export error classes
30 from numba.core.errors import *
31
32 # Re-export types itself
33 import numba.core.types as types
34
35 # Re-export all type names
36 from numba.core.types import *
37
38 # Re-export decorators
39 from numba.core.decorators import (cfunc, generated_jit, jit, njit, stencil,
40 jit_module)
41
42 # Re-export vectorize decorators and the thread layer querying function
43 from numba.np.ufunc import (vectorize, guvectorize, threading_layer,
44 get_num_threads, set_num_threads)
45
46 # Re-export Numpy helpers
47 from numba.np.numpy_support import carray, farray, from_dtype
48
49 # Re-export experimental
50 from numba import experimental
51
52 # Re-export experimental.jitclass as jitclass, this is deprecated
53 from numba.experimental.jitclass.decorators import _warning_jitclass as jitclass
54
55 # Initialize withcontexts
56 import numba.core.withcontexts
57 from numba.core.withcontexts import objmode_context as objmode
58
59 # Keep this for backward compatibility.
60 test = runtests.main
61
62
63 __all__ = """
64 cfunc
65 from_dtype
66 guvectorize
67 jit
68 experimental
69 njit
70 stencil
71 jit_module
72 jitclass
73 typeof
74 prange
75 gdb
76 gdb_breakpoint
77 gdb_init
78 vectorize
79 objmode
80 literal_unroll
81 get_num_threads
82 set_num_threads
83 """.split() + types.__all__ + errors.__all__
84
85
86 _min_llvmlite_version = (0, 33, 0)
87 _min_llvm_version = (9, 0, 0)
88
89 def _ensure_llvm():
90 """
91 Make sure llvmlite is operational.
92 """
93 import warnings
94 import llvmlite
95
96 # Only look at the the major, minor and bugfix version numbers.
97 # Ignore other stuffs
98 regex = re.compile(r'(\d+)\.(\d+).(\d+)')
99 m = regex.match(llvmlite.__version__)
100 if m:
101 ver = tuple(map(int, m.groups()))
102 if ver < _min_llvmlite_version:
103 msg = ("Numba requires at least version %d.%d.%d of llvmlite.\n"
104 "Installed version is %s.\n"
105 "Please update llvmlite." %
106 (_min_llvmlite_version + (llvmlite.__version__,)))
107 raise ImportError(msg)
108 else:
109 # Not matching?
110 warnings.warn("llvmlite version format not recognized!")
111
112 from llvmlite.binding import llvm_version_info, check_jit_execution
113
114 if llvm_version_info < _min_llvm_version:
115 msg = ("Numba requires at least version %d.%d.%d of LLVM.\n"
116 "Installed llvmlite is built against version %d.%d.%d.\n"
117 "Please update llvmlite." %
118 (_min_llvm_version + llvm_version_info))
119 raise ImportError(msg)
120
121 check_jit_execution()
122
123 def _ensure_critical_deps():
124 """
125 Make sure Python, NumPy and SciPy have supported versions.
126 """
127 from numba.np.numpy_support import numpy_version
128 from numba.core.utils import PYVERSION
129
130 if PYVERSION < (3, 6):
131 raise ImportError("Numba needs Python 3.6 or greater")
132
133 if numpy_version < (1, 15):
134 raise ImportError("Numba needs NumPy 1.15 or greater")
135
136 try:
137 import scipy
138 except ImportError:
139 pass
140 else:
141 sp_version = tuple(map(int, scipy.__version__.split('.')[:2]))
142 if sp_version < (1, 0):
143 raise ImportError("Numba requires SciPy version 1.0 or greater")
144
145
146 def _try_enable_svml():
147 """
148 Tries to enable SVML if configuration permits use and the library is found.
149 """
150 if not config.DISABLE_INTEL_SVML:
151 try:
152 if sys.platform.startswith('linux'):
153 llvmlite.binding.load_library_permanently("libsvml.so")
154 elif sys.platform.startswith('darwin'):
155 llvmlite.binding.load_library_permanently("libsvml.dylib")
156 elif sys.platform.startswith('win'):
157 llvmlite.binding.load_library_permanently("svml_dispmd")
158 else:
159 return False
160 # The SVML library is loaded, therefore SVML *could* be supported.
161 # Now see if LLVM has been compiled with the SVML support patch.
162 # If llvmlite has the checking function `has_svml` and it returns
163 # True, then LLVM was compiled with SVML support and the the setup
164 # for SVML can proceed. We err on the side of caution and if the
165 # checking function is missing, regardless of that being fine for
166 # most 0.23.{0,1} llvmlite instances (i.e. conda or pip installed),
167 # we assume that SVML was not compiled in. llvmlite 0.23.2 is a
168 # bugfix release with the checking function present that will always
169 # produce correct behaviour. For context see: #3006.
170 try:
171 if not getattr(llvmlite.binding.targets, "has_svml")():
172 # has detection function, but no svml compiled in, therefore
173 # disable SVML
174 return False
175 except AttributeError:
176 if platform.machine() == 'x86_64' and config.DEBUG:
177 msg = ("SVML was found but llvmlite >= 0.23.2 is "
178 "needed to support it.")
179 warnings.warn(msg)
180 # does not have detection function, cannot detect reliably,
181 # disable SVML.
182 return False
183
184 # All is well, detection function present and reports SVML is
185 # compiled in, set the vector library to SVML.
186 llvmlite.binding.set_option('SVML', '-vector-library=SVML')
187 return True
188 except:
189 if platform.machine() == 'x86_64' and config.DEBUG:
190 warnings.warn("SVML was not found/could not be loaded.")
191 return False
192
193 _ensure_llvm()
194 _ensure_critical_deps()
195
196 # we know llvmlite is working as the above tests passed, import it now as SVML
197 # needs to mutate runtime options (sets the `-vector-library`).
198 import llvmlite
199
200 """
201 Is set to True if Intel SVML is in use.
202 """
203 config.USING_SVML = _try_enable_svml()
204
205
206 # ---------------------- WARNING WARNING WARNING ----------------------------
207 # The following imports occur below here (SVML init) because somewhere in their
208 # import sequence they have a `@njit` wrapped function. This triggers too early
209 # a bind to the underlying LLVM libraries which then irretrievably sets the LLVM
210 # SVML state to "no SVML". See https://github.com/numba/numba/issues/4689 for
211 # context.
212 # ---------------------- WARNING WARNING WARNING ----------------------------
213
214 # Initialize typed containers
215 import numba.typed
216
[end of numba/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numba/__init__.py b/numba/__init__.py
--- a/numba/__init__.py
+++ b/numba/__init__.py
@@ -49,9 +49,6 @@
# Re-export experimental
from numba import experimental
-# Re-export experimental.jitclass as jitclass, this is deprecated
-from numba.experimental.jitclass.decorators import _warning_jitclass as jitclass
-
# Initialize withcontexts
import numba.core.withcontexts
from numba.core.withcontexts import objmode_context as objmode
diff --git a/numba/experimental/jitclass/decorators.py b/numba/experimental/jitclass/decorators.py
--- a/numba/experimental/jitclass/decorators.py
+++ b/numba/experimental/jitclass/decorators.py
@@ -1,6 +1,4 @@
-import warnings
-
-from numba.core import types, config, errors
+from numba.core import types, config
from numba.experimental.jitclass.base import register_class_type, ClassBuilder
@@ -28,23 +26,3 @@
return register_class_type(cls, spec, types.ClassType, ClassBuilder)
return wrap
-
-
-def _warning_jitclass(spec):
- """
- Re-export of numba.experimental.jitclass with a warning.
- To be used in numba/__init__.py.
- This endpoint is deprecated.
- """
- url = ("https://numba.pydata.org/numba-doc/latest/reference/"
- "deprecation.html#change-of-jitclass-location")
-
- msg = ("The 'numba.jitclass' decorator has moved to "
- "'numba.experimental.jitclass' to better reflect the experimental "
- "nature of the functionality. Please update your imports to "
- "accommodate this change and see {} for the time frame.".format(url))
-
- warnings.warn(msg, category=errors.NumbaDeprecationWarning,
- stacklevel=2)
-
- return jitclass(spec)
|
{"golden_diff": "diff --git a/numba/__init__.py b/numba/__init__.py\n--- a/numba/__init__.py\n+++ b/numba/__init__.py\n@@ -49,9 +49,6 @@\n # Re-export experimental\n from numba import experimental\n \n-# Re-export experimental.jitclass as jitclass, this is deprecated\n-from numba.experimental.jitclass.decorators import _warning_jitclass as jitclass\n-\n # Initialize withcontexts\n import numba.core.withcontexts\n from numba.core.withcontexts import objmode_context as objmode\ndiff --git a/numba/experimental/jitclass/decorators.py b/numba/experimental/jitclass/decorators.py\n--- a/numba/experimental/jitclass/decorators.py\n+++ b/numba/experimental/jitclass/decorators.py\n@@ -1,6 +1,4 @@\n-import warnings\n-\n-from numba.core import types, config, errors\n+from numba.core import types, config\n from numba.experimental.jitclass.base import register_class_type, ClassBuilder\n \n \n@@ -28,23 +26,3 @@\n return register_class_type(cls, spec, types.ClassType, ClassBuilder)\n \n return wrap\n-\n-\n-def _warning_jitclass(spec):\n- \"\"\"\n- Re-export of numba.experimental.jitclass with a warning.\n- To be used in numba/__init__.py.\n- This endpoint is deprecated.\n- \"\"\"\n- url = (\"https://numba.pydata.org/numba-doc/latest/reference/\"\n- \"deprecation.html#change-of-jitclass-location\")\n-\n- msg = (\"The 'numba.jitclass' decorator has moved to \"\n- \"'numba.experimental.jitclass' to better reflect the experimental \"\n- \"nature of the functionality. Please update your imports to \"\n- \"accommodate this change and see {} for the time frame.\".format(url))\n-\n- warnings.warn(msg, category=errors.NumbaDeprecationWarning,\n- stacklevel=2)\n-\n- return jitclass(spec)\n", "issue": "Reminder to remove the `numba.jitclass` shim\nThis is a reminder to remove the `numba.jitclass` shim for 0.5x, at the moment x=1. Perhaps x should be 2, so that users have a bit more time. xref schedule: http://numba.pydata.org/numba-doc/latest/reference/deprecation.html#id7\n", "before_files": [{"content": "import warnings\n\nfrom numba.core import types, config, errors\nfrom numba.experimental.jitclass.base import register_class_type, ClassBuilder\n\n\ndef jitclass(spec):\n \"\"\"\n A decorator for creating a jitclass.\n\n **arguments**:\n\n - spec:\n Specifies the types of each field on this class.\n Must be a dictionary or a sequence.\n With a dictionary, use collections.OrderedDict for stable ordering.\n With a sequence, it must contain 2-tuples of (fieldname, fieldtype).\n\n **returns**:\n\n A callable that takes a class object, which will be compiled.\n \"\"\"\n\n def wrap(cls):\n if config.DISABLE_JIT:\n return cls\n else:\n return register_class_type(cls, spec, types.ClassType, ClassBuilder)\n\n return wrap\n\n\ndef _warning_jitclass(spec):\n \"\"\"\n Re-export of numba.experimental.jitclass with a warning.\n To be used in numba/__init__.py.\n This endpoint is deprecated.\n \"\"\"\n url = (\"https://numba.pydata.org/numba-doc/latest/reference/\"\n \"deprecation.html#change-of-jitclass-location\")\n\n msg = (\"The 'numba.jitclass' decorator has moved to \"\n \"'numba.experimental.jitclass' to better reflect the experimental \"\n \"nature of the functionality. Please update your imports to \"\n \"accommodate this change and see {} for the time frame.\".format(url))\n\n warnings.warn(msg, category=errors.NumbaDeprecationWarning,\n stacklevel=2)\n\n return jitclass(spec)\n", "path": "numba/experimental/jitclass/decorators.py"}, {"content": "\"\"\"\nExpose top-level symbols that are safe for import *\n\"\"\"\n\nimport platform\nimport re\nimport sys\nimport warnings\n\nfrom ._version import get_versions\nfrom numba.misc.init_utils import generate_version_info\n\n__version__ = get_versions()['version']\nversion_info = generate_version_info(__version__)\ndel get_versions\ndel generate_version_info\n\n\nfrom numba.core import config\nfrom numba.testing import _runtests as runtests\nfrom numba.core import types, errors\n\n# Re-export typeof\nfrom numba.misc.special import (\n typeof, prange, pndindex, gdb, gdb_breakpoint, gdb_init,\n literally, literal_unroll\n)\n\n# Re-export error classes\nfrom numba.core.errors import *\n\n# Re-export types itself\nimport numba.core.types as types\n\n# Re-export all type names\nfrom numba.core.types import *\n\n# Re-export decorators\nfrom numba.core.decorators import (cfunc, generated_jit, jit, njit, stencil,\n jit_module)\n\n# Re-export vectorize decorators and the thread layer querying function\nfrom numba.np.ufunc import (vectorize, guvectorize, threading_layer,\n get_num_threads, set_num_threads)\n\n# Re-export Numpy helpers\nfrom numba.np.numpy_support import carray, farray, from_dtype\n\n# Re-export experimental\nfrom numba import experimental\n\n# Re-export experimental.jitclass as jitclass, this is deprecated\nfrom numba.experimental.jitclass.decorators import _warning_jitclass as jitclass\n\n# Initialize withcontexts\nimport numba.core.withcontexts\nfrom numba.core.withcontexts import objmode_context as objmode\n\n# Keep this for backward compatibility.\ntest = runtests.main\n\n\n__all__ = \"\"\"\n cfunc\n from_dtype\n guvectorize\n jit\n experimental\n njit\n stencil\n jit_module\n jitclass\n typeof\n prange\n gdb\n gdb_breakpoint\n gdb_init\n vectorize\n objmode\n literal_unroll\n get_num_threads\n set_num_threads\n \"\"\".split() + types.__all__ + errors.__all__\n\n\n_min_llvmlite_version = (0, 33, 0)\n_min_llvm_version = (9, 0, 0)\n\ndef _ensure_llvm():\n \"\"\"\n Make sure llvmlite is operational.\n \"\"\"\n import warnings\n import llvmlite\n\n # Only look at the the major, minor and bugfix version numbers.\n # Ignore other stuffs\n regex = re.compile(r'(\\d+)\\.(\\d+).(\\d+)')\n m = regex.match(llvmlite.__version__)\n if m:\n ver = tuple(map(int, m.groups()))\n if ver < _min_llvmlite_version:\n msg = (\"Numba requires at least version %d.%d.%d of llvmlite.\\n\"\n \"Installed version is %s.\\n\"\n \"Please update llvmlite.\" %\n (_min_llvmlite_version + (llvmlite.__version__,)))\n raise ImportError(msg)\n else:\n # Not matching?\n warnings.warn(\"llvmlite version format not recognized!\")\n\n from llvmlite.binding import llvm_version_info, check_jit_execution\n\n if llvm_version_info < _min_llvm_version:\n msg = (\"Numba requires at least version %d.%d.%d of LLVM.\\n\"\n \"Installed llvmlite is built against version %d.%d.%d.\\n\"\n \"Please update llvmlite.\" %\n (_min_llvm_version + llvm_version_info))\n raise ImportError(msg)\n\n check_jit_execution()\n\ndef _ensure_critical_deps():\n \"\"\"\n Make sure Python, NumPy and SciPy have supported versions.\n \"\"\"\n from numba.np.numpy_support import numpy_version\n from numba.core.utils import PYVERSION\n\n if PYVERSION < (3, 6):\n raise ImportError(\"Numba needs Python 3.6 or greater\")\n\n if numpy_version < (1, 15):\n raise ImportError(\"Numba needs NumPy 1.15 or greater\")\n\n try:\n import scipy\n except ImportError:\n pass\n else:\n sp_version = tuple(map(int, scipy.__version__.split('.')[:2]))\n if sp_version < (1, 0):\n raise ImportError(\"Numba requires SciPy version 1.0 or greater\")\n\n\ndef _try_enable_svml():\n \"\"\"\n Tries to enable SVML if configuration permits use and the library is found.\n \"\"\"\n if not config.DISABLE_INTEL_SVML:\n try:\n if sys.platform.startswith('linux'):\n llvmlite.binding.load_library_permanently(\"libsvml.so\")\n elif sys.platform.startswith('darwin'):\n llvmlite.binding.load_library_permanently(\"libsvml.dylib\")\n elif sys.platform.startswith('win'):\n llvmlite.binding.load_library_permanently(\"svml_dispmd\")\n else:\n return False\n # The SVML library is loaded, therefore SVML *could* be supported.\n # Now see if LLVM has been compiled with the SVML support patch.\n # If llvmlite has the checking function `has_svml` and it returns\n # True, then LLVM was compiled with SVML support and the the setup\n # for SVML can proceed. We err on the side of caution and if the\n # checking function is missing, regardless of that being fine for\n # most 0.23.{0,1} llvmlite instances (i.e. conda or pip installed),\n # we assume that SVML was not compiled in. llvmlite 0.23.2 is a\n # bugfix release with the checking function present that will always\n # produce correct behaviour. For context see: #3006.\n try:\n if not getattr(llvmlite.binding.targets, \"has_svml\")():\n # has detection function, but no svml compiled in, therefore\n # disable SVML\n return False\n except AttributeError:\n if platform.machine() == 'x86_64' and config.DEBUG:\n msg = (\"SVML was found but llvmlite >= 0.23.2 is \"\n \"needed to support it.\")\n warnings.warn(msg)\n # does not have detection function, cannot detect reliably,\n # disable SVML.\n return False\n\n # All is well, detection function present and reports SVML is\n # compiled in, set the vector library to SVML.\n llvmlite.binding.set_option('SVML', '-vector-library=SVML')\n return True\n except:\n if platform.machine() == 'x86_64' and config.DEBUG:\n warnings.warn(\"SVML was not found/could not be loaded.\")\n return False\n\n_ensure_llvm()\n_ensure_critical_deps()\n\n# we know llvmlite is working as the above tests passed, import it now as SVML\n# needs to mutate runtime options (sets the `-vector-library`).\nimport llvmlite\n\n\"\"\"\nIs set to True if Intel SVML is in use.\n\"\"\"\nconfig.USING_SVML = _try_enable_svml()\n\n\n# ---------------------- WARNING WARNING WARNING ----------------------------\n# The following imports occur below here (SVML init) because somewhere in their\n# import sequence they have a `@njit` wrapped function. This triggers too early\n# a bind to the underlying LLVM libraries which then irretrievably sets the LLVM\n# SVML state to \"no SVML\". See https://github.com/numba/numba/issues/4689 for\n# context.\n# ---------------------- WARNING WARNING WARNING ----------------------------\n\n# Initialize typed containers\nimport numba.typed\n", "path": "numba/__init__.py"}]}
| 3,332 | 454 |
gh_patches_debug_5329
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-3928
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Broken logo in docs
### Steps to reproduce the problem
1. https://saleor.readthedocs.io/en/latest/gettingstarted.html
2.

**System information**
Operating system: OSX
Browser: Chrome
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # saleor documentation build configuration file, created by
4 # sphinx-quickstart on Tue Jan 5 11:54:57 2016.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17 import shlex
18
19 # If extensions (or modules to document with autodoc) are in another directory,
20 # add these directories to sys.path here. If the directory is relative to the
21 # documentation root, use os.path.abspath to make it absolute, like shown here.
22 #sys.path.insert(0, os.path.abspath('.'))
23
24 # -- General configuration ------------------------------------------------
25
26 # If your documentation needs a minimal Sphinx version, state it here.
27 #needs_sphinx = '1.0'
28
29 # Add any Sphinx extension module names here, as strings. They can be
30 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 # ones.
32 extensions = [
33 'sphinx.ext.autodoc',
34 ]
35
36 # Add any paths that contain templates here, relative to this directory.
37 templates_path = ['_templates']
38
39 # The suffix(es) of source filenames.
40 # You can specify multiple suffix as a list of string:
41 # source_suffix = ['.rst', '.md']
42 source_suffix = '.rst'
43
44 # The encoding of source files.
45 #source_encoding = 'utf-8-sig'
46
47 # The master toctree document.
48 master_doc = 'index'
49
50 # General information about the project.
51 project = u'Saleor'
52 copyright = u'2015-2018, Mirumee Software'
53 author = u'Mirumee Software'
54
55 # The version info for the project you're documenting, acts as replacement for
56 # |version| and |release|, also used in various other places throughout the
57 # built documents.
58 #
59 # The short X.Y version.
60 version = u'2018.01'
61 # The full version, including alpha/beta/rc tags.
62 release = u'2018.01'
63
64 # The language for content autogenerated by Sphinx. Refer to documentation
65 # for a list of supported languages.
66 #
67 # This is also used if you do content translation via gettext catalogs.
68 # Usually you set "language" from the command line for these cases.
69 language = None
70
71 # There are two options for replacing |today|: either, you set today to some
72 # non-false value, then it is used:
73 #today = ''
74 # Else, today_fmt is used as the format for a strftime call.
75 #today_fmt = '%B %d, %Y'
76
77 # List of patterns, relative to source directory, that match files and
78 # directories to ignore when looking for source files.
79 exclude_patterns = ['_build']
80
81 # The reST default role (used for this markup: `text`) to use for all
82 # documents.
83 #default_role = None
84
85 # If true, '()' will be appended to :func: etc. cross-reference text.
86 #add_function_parentheses = True
87
88 # If true, the current module name will be prepended to all description
89 # unit titles (such as .. function::).
90 #add_module_names = True
91
92 # If true, sectionauthor and moduleauthor directives will be shown in the
93 # output. They are ignored by default.
94 #show_authors = False
95
96 # The name of the Pygments (syntax highlighting) style to use.
97 pygments_style = 'sphinx'
98
99 # A list of ignored prefixes for module index sorting.
100 #modindex_common_prefix = []
101
102 # If true, keep warnings as "system message" paragraphs in the built documents.
103 #keep_warnings = False
104
105 # If true, `todo` and `todoList` produce output, else they produce nothing.
106 todo_include_todos = False
107
108
109 # -- Options for HTML output ----------------------------------------------
110
111 # The theme to use for HTML and HTML Help pages. See the documentation for
112 # a list of builtin themes.
113 html_theme = 'sphinx_rtd_theme'
114
115 # Theme options are theme-specific and customize the look and feel of a theme
116 # further. For a list of options available for each theme, see the
117 # documentation.
118 #html_theme_options = {}
119
120 # Add any paths that contain custom themes here, relative to this directory.
121 #html_theme_path = []
122
123 # The name for this set of Sphinx documents. If None, it defaults to
124 # "<project> v<release> documentation".
125 html_title = 'Saleor documentation'
126
127 # A shorter title for the navigation bar. Default is the same as html_title.
128 #html_short_title = None
129
130 # The name of an image file (relative to this directory) to place at the top
131 # of the sidebar.
132 html_logo = 'logo-white.svg'
133
134 # The name of an image file (within the static path) to use as favicon of the
135 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
136 # pixels large.
137 #html_favicon = None
138
139 # Add any paths that contain custom static files (such as style sheets) here,
140 # relative to this directory. They are copied after the builtin static files,
141 # so a file named "default.css" will overwrite the builtin "default.css".
142 html_static_path = ['_static']
143
144 # Add any extra paths that contain custom files (such as robots.txt or
145 # .htaccess) here, relative to this directory. These files are copied
146 # directly to the root of the documentation.
147 #html_extra_path = []
148
149 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
150 # using the given strftime format.
151 #html_last_updated_fmt = '%b %d, %Y'
152
153 # If true, SmartyPants will be used to convert quotes and dashes to
154 # typographically correct entities.
155 #html_use_smartypants = True
156
157 # Custom sidebar templates, maps document names to template names.
158 #html_sidebars = {}
159
160 # Additional templates that should be rendered to pages, maps page names to
161 # template names.
162 #html_additional_pages = {}
163
164 # If false, no module index is generated.
165 #html_domain_indices = True
166
167 # If false, no index is generated.
168 #html_use_index = True
169
170 # If true, the index is split into individual pages for each letter.
171 #html_split_index = False
172
173 # If true, links to the reST sources are added to the pages.
174 #html_show_sourcelink = True
175
176 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
177 #html_show_sphinx = True
178
179 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
180 #html_show_copyright = True
181
182 # If true, an OpenSearch description file will be output, and all pages will
183 # contain a <link> tag referring to it. The value of this option must be the
184 # base URL from which the finished HTML is served.
185 #html_use_opensearch = ''
186
187 # This is the file name suffix for HTML files (e.g. ".xhtml").
188 #html_file_suffix = None
189
190 # Language to be used for generating the HTML full-text search index.
191 # Sphinx supports the following languages:
192 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
193 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
194 #html_search_language = 'en'
195
196 # A dictionary with options for the search language support, empty by default.
197 # Now only 'ja' uses this config value
198 #html_search_options = {'type': 'default'}
199
200 # The name of a javascript file (relative to the configuration directory) that
201 # implements a search results scorer. If empty, the default will be used.
202 #html_search_scorer = 'scorer.js'
203
204 # Output file base name for HTML help builder.
205 htmlhelp_basename = 'saleordoc'
206
207 # -- Options for LaTeX output ---------------------------------------------
208
209 latex_elements = {
210 # The paper size ('letterpaper' or 'a4paper').
211 #'papersize': 'letterpaper',
212
213 # The font size ('10pt', '11pt' or '12pt').
214 #'pointsize': '10pt',
215
216 # Additional stuff for the LaTeX preamble.
217 #'preamble': '',
218
219 # Latex figure (float) alignment
220 #'figure_align': 'htbp',
221 }
222
223 # Grouping the document tree into LaTeX files. List of tuples
224 # (source start file, target name, title,
225 # author, documentclass [howto, manual, or own class]).
226 latex_documents = [
227 (master_doc, 'saleor.tex', u'saleor Documentation',
228 u'Mirumee Software', 'manual'),
229 ]
230
231 # The name of an image file (relative to this directory) to place at the top of
232 # the title page.
233 #latex_logo = None
234
235 # For "manual" documents, if this is true, then toplevel headings are parts,
236 # not chapters.
237 #latex_use_parts = False
238
239 # If true, show page references after internal links.
240 #latex_show_pagerefs = False
241
242 # If true, show URL addresses after external links.
243 #latex_show_urls = False
244
245 # Documents to append as an appendix to all manuals.
246 #latex_appendices = []
247
248 # If false, no module index is generated.
249 #latex_domain_indices = True
250
251
252 # -- Options for manual page output ---------------------------------------
253
254 # One entry per manual page. List of tuples
255 # (source start file, name, description, authors, manual section).
256 man_pages = [
257 (master_doc, 'saleor', u'saleor Documentation',
258 [author], 1)
259 ]
260
261 # If true, show URL addresses after external links.
262 #man_show_urls = False
263
264
265 # -- Options for Texinfo output -------------------------------------------
266
267 # Grouping the document tree into Texinfo files. List of tuples
268 # (source start file, target name, title, author,
269 # dir menu entry, description, category)
270 texinfo_documents = [
271 (master_doc, 'saleor', u'saleor Documentation',
272 author, 'saleor', 'One line description of project.',
273 'Miscellaneous'),
274 ]
275
276 # Documents to append as an appendix to all manuals.
277 #texinfo_appendices = []
278
279 # If false, no module index is generated.
280 #texinfo_domain_indices = True
281
282 # How to display URL addresses: 'footnote', 'no', or 'inline'.
283 #texinfo_show_urls = 'footnote'
284
285 # If true, do not generate a @detailmenu in the "Top" node's menu.
286 #texinfo_no_detailmenu = False
287
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -129,7 +129,7 @@
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = 'logo-white.svg'
+html_logo = 'logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -129,7 +129,7 @@\n \n # The name of an image file (relative to this directory) to place at the top\n # of the sidebar.\n-html_logo = 'logo-white.svg'\n+html_logo = 'logo.svg'\n \n # The name of an image file (within the static path) to use as favicon of the\n # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n", "issue": "Broken logo in docs\n### Steps to reproduce the problem\r\n1. https://saleor.readthedocs.io/en/latest/gettingstarted.html\r\n2. \r\n\r\n\r\n**System information**\r\nOperating system: OSX\r\nBrowser: Chrome\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# saleor documentation build configuration file, created by\n# sphinx-quickstart on Tue Jan 5 11:54:57 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Saleor'\ncopyright = u'2015-2018, Mirumee Software'\nauthor = u'Mirumee Software'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'2018.01'\n# The full version, including alpha/beta/rc tags.\nrelease = u'2018.01'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = 'Saleor documentation'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = 'logo-white.svg'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'saleordoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'saleor.tex', u'saleor Documentation',\n u'Mirumee Software', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'saleor', u'saleor Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'saleor', u'saleor Documentation',\n author, 'saleor', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]}
| 3,761 | 130 |
gh_patches_debug_22430
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-40
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG][client] web session has a different signature with the local session
**Describe the bug**
Web has it's own session, but it's `run` method has a different signature with the local session, which lead to a bug that when user submits more than one tensor, only the first tensor would be executed.
**To Reproduce**
```python
In [14]: from mars.session import new_session
In [15]: sess = new_session('http://0.0.0.0:49911')
In [16]: a = mt.ones((2, 2))
In [17]: sess.run(a, a+1)
Out[17]:
[array([[1., 1.],
[1., 1.]])]
```
</issue>
<code>
[start of mars/web/session.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import base64
18 import json
19 import time
20 import logging
21
22 import requests
23
24 from ..compat import six, TimeoutError
25 from ..serialize import dataserializer
26 from ..errors import ExecutionInterrupted
27
28 logger = logging.getLogger(__name__)
29
30
31 class Session(object):
32 def __init__(self, endpoint, args=None):
33 self._endpoint = endpoint
34 self._args = args
35 self._tensor_to_graph = dict()
36
37 self._req_session = requests.Session()
38
39 from requests.adapters import HTTPAdapter
40 self._req_session.mount('http://stackoverflow.com', HTTPAdapter(max_retries=5))
41 self._main()
42
43 @property
44 def session_id(self):
45 return self._session_id
46
47 def _main(self):
48 resp = self._req_session.post(self._endpoint + '/api/session', self._args)
49 if resp.status_code >= 400:
50 raise SystemError('Failed to create mars session.')
51 content = json.loads(resp.text)
52 self._session_id = content['session_id']
53
54 def run(self, tensors, compose=True, wait=True, timeout=-1):
55 from ..graph import DirectedGraph
56 graph = DirectedGraph()
57
58 if not isinstance(tensors, (list, tuple, set)):
59 tensors = [tensors]
60 for t in tensors:
61 graph = t.build_graph(graph=graph, tiled=False, compose=compose)
62 targets = [t.key for t in tensors]
63
64 targets_join = ','.join(targets)
65 session_url = self._endpoint + '/api/session/' + self._session_id
66 graph_json = graph.to_json()
67
68 resp_json = self._submit_graph(graph_json, targets_join)
69 graph_key = resp_json['graph_key']
70 graph_url = session_url + '/graph/' + graph_key
71
72 for t in tensors:
73 self._tensor_to_graph[t.key] = graph_key
74
75 if wait:
76 exec_start_time = time.time()
77 while timeout <= 0 or time.time() - exec_start_time <= timeout:
78 try:
79 time.sleep(1)
80 try:
81 resp = self._req_session.get(graph_url)
82 except requests.ConnectionError as ex:
83 err_msg = str(ex)
84 if 'ConnectionResetError' in err_msg or 'Connection refused' in err_msg:
85 continue
86 raise
87 if resp.status_code == 504:
88 logging.debug('Gateway Time-out, try again')
89 continue
90 if resp.status_code >= 400:
91 raise SystemError('Failed to read task status. Code: %d, Reason: %s, Content:\n%s' %
92 (resp.status_code, resp.reason, resp.text))
93 resp_json = json.loads(resp.text)
94 if resp_json['state'] in ('running', 'preparing'):
95 continue
96 elif resp_json['state'] == 'success':
97 break
98 elif resp_json['state'] == 'cancelled':
99 raise ExecutionInterrupted
100 elif resp_json['state'] == 'failed':
101 # TODO add traceback
102 if 'traceback' in resp_json:
103 traceback = resp_json['traceback']
104 if isinstance(traceback, list):
105 traceback = ''.join(str(s) for s in traceback)
106 raise SystemError('Graph execution failed.\nMessage: %s\nTraceback from server:\n%s' %
107 (resp_json['msg'], traceback))
108 else:
109 raise SystemError('Graph execution failed with unknown reason.')
110 else:
111 raise SystemError('Unknown graph execution state %s' % resp_json['state'])
112 except KeyboardInterrupt:
113 resp = self._req_session.delete(graph_url)
114 if resp.status_code >= 400:
115 raise SystemError('Failed to stop graph execution. Code: %d, Reason: %s, Content:\n%s' %
116 (resp.status_code, resp.reason, resp.text))
117 if 0 < timeout < time.time() - exec_start_time:
118 raise TimeoutError
119 data_list = []
120 for tk in targets:
121 resp = self._req_session.get(session_url + '/graph/' + graph_key + '/data/' + tk)
122 if resp.status_code >= 400:
123 continue
124 data_list.append(dataserializer.loads(resp.content))
125 return data_list
126 else:
127 return graph_key
128
129 def decref(self, *keys):
130 session_url = self._endpoint + '/api/session/' + self._session_id
131 for k in keys:
132 if k not in self._tensor_to_graph:
133 continue
134 data_url = session_url + '/graph/%s/data/%s' % (self._tensor_to_graph[k], k)
135 self._req_session.delete(data_url)
136
137 def stop(self, graph_key):
138 session_url = self._endpoint + '/api/session/' + self._session_id
139 graph_url = session_url + '/graph/' + graph_key
140 resp = self._req_session.delete(graph_url)
141 if resp.status_code >= 400:
142 raise SystemError('Failed to stop graph execution. Code: %d, Reason: %s, Content:\n%s' %
143 (resp.status_code, resp.reason, resp.text))
144
145 def _submit_graph(self, graph_json, targets):
146 session_url = self._endpoint + '/api/session/' + self._session_id
147 resp = self._req_session.post(session_url + '/graph', dict(
148 graph=json.dumps(graph_json),
149 target=targets,
150 ))
151 if resp.status_code >= 400:
152 resp_json = json.loads(resp.text)
153 exc_info = base64.b64decode(resp_json['exc_info'])
154 six.reraise(*exc_info)
155 resp_json = json.loads(resp.text)
156 return resp_json
157
158 def close(self):
159 self.decref(*list(self._tensor_to_graph.keys()))
160
161 resp = self._req_session.delete(self._endpoint + '/api/session/' + self._session_id)
162 if resp.status_code >= 400:
163 raise SystemError('Failed to close mars session.')
164
165 def check_service_ready(self, timeout=1):
166 try:
167 resp = self._req_session.get(self._endpoint + '/api', timeout=timeout)
168 except (requests.ConnectionError, requests.Timeout):
169 return False
170 if resp.status_code >= 400:
171 return False
172 return True
173
174 def count_workers(self):
175 resp = self._req_session.get(self._endpoint + '/api/worker', timeout=1)
176 return json.loads(resp.text)
177
178 def __enter__(self):
179 return self
180
181 def __exit__(self, exc_type, exc_val, exc_tb):
182 self.close()
183
[end of mars/web/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mars/web/session.py b/mars/web/session.py
--- a/mars/web/session.py
+++ b/mars/web/session.py
@@ -24,6 +24,7 @@
from ..compat import six, TimeoutError
from ..serialize import dataserializer
from ..errors import ExecutionInterrupted
+from ..graph import DirectedGraph
logger = logging.getLogger(__name__)
@@ -51,12 +52,14 @@
content = json.loads(resp.text)
self._session_id = content['session_id']
- def run(self, tensors, compose=True, wait=True, timeout=-1):
- from ..graph import DirectedGraph
- graph = DirectedGraph()
+ def run(self, *tensors, **kw):
+ timeout = kw.pop('timeout', -1)
+ compose = kw.pop('compose', True)
+ wait = kw.pop('wait', True)
+ if kw:
+ raise TypeError('run got unexpected key arguments {0}'.format(', '.join(kw.keys())))
- if not isinstance(tensors, (list, tuple, set)):
- tensors = [tensors]
+ graph = DirectedGraph()
for t in tensors:
graph = t.build_graph(graph=graph, tiled=False, compose=compose)
targets = [t.key for t in tensors]
|
{"golden_diff": "diff --git a/mars/web/session.py b/mars/web/session.py\n--- a/mars/web/session.py\n+++ b/mars/web/session.py\n@@ -24,6 +24,7 @@\n from ..compat import six, TimeoutError\n from ..serialize import dataserializer\n from ..errors import ExecutionInterrupted\n+from ..graph import DirectedGraph\n \n logger = logging.getLogger(__name__)\n \n@@ -51,12 +52,14 @@\n content = json.loads(resp.text)\n self._session_id = content['session_id']\n \n- def run(self, tensors, compose=True, wait=True, timeout=-1):\n- from ..graph import DirectedGraph\n- graph = DirectedGraph()\n+ def run(self, *tensors, **kw):\n+ timeout = kw.pop('timeout', -1)\n+ compose = kw.pop('compose', True)\n+ wait = kw.pop('wait', True)\n+ if kw:\n+ raise TypeError('run got unexpected key arguments {0}'.format(', '.join(kw.keys())))\n \n- if not isinstance(tensors, (list, tuple, set)):\n- tensors = [tensors]\n+ graph = DirectedGraph()\n for t in tensors:\n graph = t.build_graph(graph=graph, tiled=False, compose=compose)\n targets = [t.key for t in tensors]\n", "issue": "[BUG][client] web session has a different signature with the local session\n**Describe the bug**\r\n\r\nWeb has it's own session, but it's `run` method has a different signature with the local session, which lead to a bug that when user submits more than one tensor, only the first tensor would be executed.\r\n\r\n**To Reproduce**\r\n\r\n```python\r\nIn [14]: from mars.session import new_session \r\n\r\nIn [15]: sess = new_session('http://0.0.0.0:49911') \r\n\r\nIn [16]: a = mt.ones((2, 2)) \r\n\r\nIn [17]: sess.run(a, a+1) \r\nOut[17]: \r\n[array([[1., 1.],\r\n [1., 1.]])]\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport time\nimport logging\n\nimport requests\n\nfrom ..compat import six, TimeoutError\nfrom ..serialize import dataserializer\nfrom ..errors import ExecutionInterrupted\n\nlogger = logging.getLogger(__name__)\n\n\nclass Session(object):\n def __init__(self, endpoint, args=None):\n self._endpoint = endpoint\n self._args = args\n self._tensor_to_graph = dict()\n\n self._req_session = requests.Session()\n\n from requests.adapters import HTTPAdapter\n self._req_session.mount('http://stackoverflow.com', HTTPAdapter(max_retries=5))\n self._main()\n\n @property\n def session_id(self):\n return self._session_id\n\n def _main(self):\n resp = self._req_session.post(self._endpoint + '/api/session', self._args)\n if resp.status_code >= 400:\n raise SystemError('Failed to create mars session.')\n content = json.loads(resp.text)\n self._session_id = content['session_id']\n\n def run(self, tensors, compose=True, wait=True, timeout=-1):\n from ..graph import DirectedGraph\n graph = DirectedGraph()\n\n if not isinstance(tensors, (list, tuple, set)):\n tensors = [tensors]\n for t in tensors:\n graph = t.build_graph(graph=graph, tiled=False, compose=compose)\n targets = [t.key for t in tensors]\n\n targets_join = ','.join(targets)\n session_url = self._endpoint + '/api/session/' + self._session_id\n graph_json = graph.to_json()\n\n resp_json = self._submit_graph(graph_json, targets_join)\n graph_key = resp_json['graph_key']\n graph_url = session_url + '/graph/' + graph_key\n\n for t in tensors:\n self._tensor_to_graph[t.key] = graph_key\n\n if wait:\n exec_start_time = time.time()\n while timeout <= 0 or time.time() - exec_start_time <= timeout:\n try:\n time.sleep(1)\n try:\n resp = self._req_session.get(graph_url)\n except requests.ConnectionError as ex:\n err_msg = str(ex)\n if 'ConnectionResetError' in err_msg or 'Connection refused' in err_msg:\n continue\n raise\n if resp.status_code == 504:\n logging.debug('Gateway Time-out, try again')\n continue\n if resp.status_code >= 400:\n raise SystemError('Failed to read task status. Code: %d, Reason: %s, Content:\\n%s' %\n (resp.status_code, resp.reason, resp.text))\n resp_json = json.loads(resp.text)\n if resp_json['state'] in ('running', 'preparing'):\n continue\n elif resp_json['state'] == 'success':\n break\n elif resp_json['state'] == 'cancelled':\n raise ExecutionInterrupted\n elif resp_json['state'] == 'failed':\n # TODO add traceback\n if 'traceback' in resp_json:\n traceback = resp_json['traceback']\n if isinstance(traceback, list):\n traceback = ''.join(str(s) for s in traceback)\n raise SystemError('Graph execution failed.\\nMessage: %s\\nTraceback from server:\\n%s' %\n (resp_json['msg'], traceback))\n else:\n raise SystemError('Graph execution failed with unknown reason.')\n else:\n raise SystemError('Unknown graph execution state %s' % resp_json['state'])\n except KeyboardInterrupt:\n resp = self._req_session.delete(graph_url)\n if resp.status_code >= 400:\n raise SystemError('Failed to stop graph execution. Code: %d, Reason: %s, Content:\\n%s' %\n (resp.status_code, resp.reason, resp.text))\n if 0 < timeout < time.time() - exec_start_time:\n raise TimeoutError\n data_list = []\n for tk in targets:\n resp = self._req_session.get(session_url + '/graph/' + graph_key + '/data/' + tk)\n if resp.status_code >= 400:\n continue\n data_list.append(dataserializer.loads(resp.content))\n return data_list\n else:\n return graph_key\n\n def decref(self, *keys):\n session_url = self._endpoint + '/api/session/' + self._session_id\n for k in keys:\n if k not in self._tensor_to_graph:\n continue\n data_url = session_url + '/graph/%s/data/%s' % (self._tensor_to_graph[k], k)\n self._req_session.delete(data_url)\n\n def stop(self, graph_key):\n session_url = self._endpoint + '/api/session/' + self._session_id\n graph_url = session_url + '/graph/' + graph_key\n resp = self._req_session.delete(graph_url)\n if resp.status_code >= 400:\n raise SystemError('Failed to stop graph execution. Code: %d, Reason: %s, Content:\\n%s' %\n (resp.status_code, resp.reason, resp.text))\n\n def _submit_graph(self, graph_json, targets):\n session_url = self._endpoint + '/api/session/' + self._session_id\n resp = self._req_session.post(session_url + '/graph', dict(\n graph=json.dumps(graph_json),\n target=targets,\n ))\n if resp.status_code >= 400:\n resp_json = json.loads(resp.text)\n exc_info = base64.b64decode(resp_json['exc_info'])\n six.reraise(*exc_info)\n resp_json = json.loads(resp.text)\n return resp_json\n\n def close(self):\n self.decref(*list(self._tensor_to_graph.keys()))\n\n resp = self._req_session.delete(self._endpoint + '/api/session/' + self._session_id)\n if resp.status_code >= 400:\n raise SystemError('Failed to close mars session.')\n\n def check_service_ready(self, timeout=1):\n try:\n resp = self._req_session.get(self._endpoint + '/api', timeout=timeout)\n except (requests.ConnectionError, requests.Timeout):\n return False\n if resp.status_code >= 400:\n return False\n return True\n\n def count_workers(self):\n resp = self._req_session.get(self._endpoint + '/api/worker', timeout=1)\n return json.loads(resp.text)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n", "path": "mars/web/session.py"}]}
| 2,722 | 290 |
gh_patches_debug_5368
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-337
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add consistency check for model data object in the CI tests
We should take a snapshot of a "correct" model data object (as returned by `get_model_data()`) for the CI testing dataset and always compare the json-parsed outputs of `get_model_data` with it.
Might be a bit tedious to alter every time when we run `get_model_data` ,but otherwise there are certain situation where we can get silent failures. Since certain parts of the Model Analyzer have more general exception catching built in (since certain data simply can't be produced in various situations) and `get_model_data` itself is made to work with incomplete data in order to return this data during training.
If we want to be really scrupulous we should check the model data object at each phase of training, but I think that'd be overdoing it.
</issue>
<code>
[start of mindsdb/__about__.py]
1 __title__ = 'MindsDB'
2 __package_name__ = 'mindsdb'
3 __version__ = '1.7.11'
4 __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
5 __email__ = "[email protected]"
6 __author__ = 'MindsDB Inc'
7 __github__ = 'https://github.com/mindsdb/mindsdb'
8 __pypi__ = 'https://pypi.org/project/mindsdb'
9 __license__ = 'MIT'
10 __copyright__ = 'Copyright 2018- mindsdb'
11
[end of mindsdb/__about__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.7.11'
+__version__ = '1.7.12'
__description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
|
{"golden_diff": "diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py\n--- a/mindsdb/__about__.py\n+++ b/mindsdb/__about__.py\n@@ -1,6 +1,6 @@\n __title__ = 'MindsDB'\n __package_name__ = 'mindsdb'\n-__version__ = '1.7.11'\n+__version__ = '1.7.12'\n __description__ = \"MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n __email__ = \"[email protected]\"\n __author__ = 'MindsDB Inc'\n", "issue": "Add consistency check for model data object in the CI tests\nWe should take a snapshot of a \"correct\" model data object (as returned by `get_model_data()`) for the CI testing dataset and always compare the json-parsed outputs of `get_model_data` with it.\r\n\r\nMight be a bit tedious to alter every time when we run `get_model_data` ,but otherwise there are certain situation where we can get silent failures. Since certain parts of the Model Analyzer have more general exception catching built in (since certain data simply can't be produced in various situations) and `get_model_data` itself is made to work with incomplete data in order to return this data during training.\r\n\r\nIf we want to be really scrupulous we should check the model data object at each phase of training, but I think that'd be overdoing it.\n", "before_files": [{"content": "__title__ = 'MindsDB'\n__package_name__ = 'mindsdb'\n__version__ = '1.7.11'\n__description__ = \"MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/mindsdb'\n__pypi__ = 'https://pypi.org/project/mindsdb'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018- mindsdb'\n", "path": "mindsdb/__about__.py"}]}
| 858 | 147 |
gh_patches_debug_34409
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-822
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[clock] It is more intuitive to round clock emoji to the nearest 15 or 45 minutes
The emoji clock is being used in many apps these days, and I noticed that py3status implementation is not consistent with the rest and is less intuitive to me.
Currently the clock emoji is not being updated until the hour exactly reaches the next step, so for example:
- 14:59 🕝
Instead, it is more intuitive to choose icon for the time rounded to the nearest 15 or 45 minutes, that is:
- 14:01 🕑
- 14:14 🕑
- 14:15 🕝
- 14:44 🕝
- 14:45 🕒
- 14:59 🕒
/cc @tobes as you created this module.
</issue>
<code>
[start of py3status/modules/clock.py]
1 # -*- coding: utf-8 -*-
2 """
3 Display date and time.
4
5 This module allows one or more datetimes to be displayed.
6 All datetimes share the same format_time but can set their own timezones.
7 Timezones are defined in the `format` using the TZ name in squiggly brackets eg
8 `{GMT}`, `{Portugal}`, `{Europe/Paris}`, `{America/Argentina/Buenos_Aires}`.
9
10 ISO-3166 two letter country codes eg `{de}` can also be used but if more than
11 one timezone exists for the country eg `{us}` the first one will be selected.
12
13 `{Local}` can be used for the local settings of your computer.
14
15 Note: Timezones are case sensitive
16
17 A full list of timezones can be found at
18 https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
19
20 Configuration parameters:
21 block_hours: length of time period for all blocks in hours (default 12)
22 blocks: a string, where each character represents time period
23 from the start of a time period.
24 (default '🕛🕧🕐🕜🕑🕝🕒🕞🕓🕟🕔🕠🕕🕡🕖🕢🕗🕣🕘🕤🕙🕥🕚🕦')
25 button_change_format: button that switches format used setting to None
26 disables (default 1)
27 button_change_time_format: button that switches format_time used. Setting
28 to None disables (default 2)
29 button_reset: button that switches display to the first timezone. Setting
30 to None disables (default 3)
31 cycle: If more than one display then how many seconds between changing the
32 display (default 0)
33 format: defines the timezones displayed. This can be a single string or a
34 list. If a list is supplied then the formats can be cycled through
35 using `cycle` or by button click. (default '{Local}')
36 format_time: format to use for the time, strftime directives such as `%H`
37 can be used this can be either a string or to allow multiple formats as
38 a list. The one used can be changed by button click.
39 *(default ['[{name_unclear} ]%c', '[{name_unclear} ]%x %X',
40 '[{name_unclear} ]%a %H:%M', '[{name_unclear} ]{icon}'])*
41
42 Format placeholders:
43 {icon} a character representing the time from `blocks`
44 {name} friendly timezone name eg `Buenos Aires`
45 {name_unclear} friendly timezone name eg `Buenos Aires` but is empty if
46 only one timezone is provided
47 {timezone} full timezone name eg `America/Argentina/Buenos_Aires`
48 {timezone_unclear} full timezone name eg `America/Argentina/Buenos_Aires`
49 but is empty if only one timezone is provided
50
51
52 Requires:
53 pytz: python library
54 tzlocal: python library
55
56 i3status.conf example:
57
58 ```
59 # cycling through London, Warsaw, Tokyo
60 clock {
61 cycle = 30
62 format = ["{Europe/London}", "{Europe/Warsaw}", "{Asia/Tokyo}"]
63 format_time = "{name} %H:%M"
64 }
65
66
67 # Show the time and date in New York
68 clock {
69 format = "Big Apple {America/New_York}"
70 format_time = "%Y-%m-%d %H:%M:%S"
71 }
72
73
74 # wall clocks
75 clock {
76 format = "{Asia/Calcutta} {Africa/Nairobi} {Asia/Bangkok}"
77 format_time = "{name} {icon}"
78 }
79 ```
80
81 @author tobes
82 @license BSD
83
84 SAMPLE OUTPUT
85 {'full_text': 'Sun 15 Jan 2017 23:27:17 GMT'}
86
87 london
88 {'full_text': 'Thursday Feb 23 1:42 AM London'}
89 """
90
91 import re
92 import math
93 from datetime import datetime
94 from time import time
95
96 import pytz
97 import tzlocal
98
99 CLOCK_BLOCKS = u'🕛🕧🕐🕜🕑🕝🕒🕞🕓🕟🕔🕠🕕🕡🕖🕢🕗🕣🕘🕤🕙🕥🕚🕦'
100
101
102 class Py3status:
103 """
104 """
105 # available configuration parameters
106 block_hours = 12
107 blocks = CLOCK_BLOCKS
108 button_change_format = 1
109 button_change_time_format = 2
110 button_reset = 3
111 cycle = 0
112 format = "{Local}"
113 format_time = [
114 '[{name_unclear} ]%c',
115 '[{name_unclear} ]%x %X',
116 '[{name_unclear} ]%a %H:%M',
117 '[{name_unclear} ]{icon}',
118 ]
119
120 def post_config_hook(self):
121 # Multiple clocks are possible that can be cycled through
122 if not isinstance(self.format, list):
123 self.format = [self.format]
124 # if only one item we don't need to cycle
125 if len(self.format) == 1:
126 self.cycle = 0
127 # find any declared timezones eg {Europe/London}
128 self._items = {}
129 matches = re.findall('\{([^}]*)\}', ''.join(self.format))
130 for match in matches:
131 self._items[match] = self._get_timezone(match)
132
133 self.multiple_tz = len(self._items) > 1
134
135 if not isinstance(self.format_time, list):
136 self.format_time = [self.format_time]
137
138 # workout how often in seconds we will need to do an update to keep the
139 # display fresh
140 self.time_deltas = []
141 for format in self.format_time:
142 format_time = re.sub('\{([^}]*)\}', '', format)
143 format_time = format_time.replace('%%', '')
144 if '%f' in format_time:
145 # microseconds
146 time_delta = 0
147 elif '%S' in format_time:
148 # seconds
149 time_delta = 1
150 elif '%s' in format_time:
151 # seconds since unix epoch start
152 time_delta = 1
153 elif '%T' in format_time:
154 # seconds included in "%H:%M:%S"
155 time_delta = 1
156 elif '%c' in format_time:
157 # Locale’s appropriate date and time representation
158 time_delta = 1
159 elif '%X' in format_time:
160 # Locale’s appropriate time representation
161 time_delta = 1
162 else:
163 time_delta = 60
164 self.time_deltas.append(time_delta)
165
166 self.active_time_format = 0
167
168 self._cycle_time = time() + self.cycle
169 self.active = 0
170
171 def _get_timezone(self, tz):
172 """
173 Find and return the time zone if possible
174 """
175 # special Local timezone
176 if tz == 'Local':
177 try:
178 return tzlocal.get_localzone()
179 except pytz.UnknownTimeZoneError:
180 return '?'
181
182 # we can use a country code to get tz
183 # FIXME this is broken for multi-timezone countries eg US
184 # for now we just grab the first one
185 if len(tz) == 2:
186 try:
187 zones = pytz.country_timezones(tz)
188 except KeyError:
189 return '?'
190 tz = zones[0]
191
192 # get the timezone
193 try:
194 zone = pytz.timezone(tz)
195 except pytz.UnknownTimeZoneError:
196 return '?'
197 return zone
198
199 def _change_active(self, diff):
200 self.active = (self.active + diff) % len(self.format)
201
202 def on_click(self, i3s_output_list, i3s_config, event):
203 """
204 Switch the displayed module or pass the event on to the active module
205 """
206 # reset cycle time
207 if event['button'] == self.button_reset:
208 self.active = 0
209 # reset the cycle time
210 self._cycle_time = time() + self.cycle
211 elif event['button'] == self.button_change_time_format:
212 self.active_time_format += 1
213 if self.active_time_format >= len(self.format_time):
214 self.active_time_format = 0
215 elif event['button'] == self.button_change_format:
216 self._change_active(1)
217
218 def clock(self, i3s_output_list, i3s_config):
219
220 # cycling
221 if self.cycle and time() >= self._cycle_time:
222 self._change_active(1)
223 self._cycle_time = time() + self.cycle
224
225 # update our times
226 times = {}
227 for name, zone in self._items.items():
228 if zone == '?':
229 times[name] = '?'
230 else:
231 t = datetime.now(zone)
232 format_time = self.format_time[self.active_time_format]
233 icon = None
234 if self.py3.format_contains(format_time, 'icon'):
235 # calculate the decimal hour
236 h = t.hour + t.minute / 60.
237 # make 12 hourly etc
238 h = h % self.block_hours
239 idx = int(math.floor(h / self.block_hours * (len(
240 self.blocks))))
241 icon = self.blocks[idx]
242
243 timezone = zone.zone
244 tzname = timezone.split('/')[-1].replace('_', ' ')
245
246 if self.multiple_tz:
247 name_unclear = tzname
248 timezone_unclear = timezone
249 else:
250 name_unclear = ''
251 timezone_unclear = ''
252
253 format_time = self.py3.safe_format(
254 format_time,
255 dict(
256 icon=icon,
257 name=tzname,
258 name_unclear=name_unclear,
259 timezone=timezone,
260 timezone_unclear=timezone_unclear,
261 ))
262 if self.py3.is_python_2():
263 format_time = t.strftime(format_time.encode('utf-8'))
264 else:
265 format_time = t.strftime(format_time)
266 times[name] = format_time
267
268 # work out when we need to update
269 timeout = self.py3.time_in(
270 sync_to=self.time_deltas[self.active_time_format]
271 )
272
273 # if cycling we need to make sure we update when they are needed
274 if self.cycle:
275 cycle_timeout = self._cycle_time
276 timeout = min(timeout, cycle_timeout)
277
278 return {
279 'full_text': self.py3.safe_format(self.format[self.active], times),
280 'cached_until': timeout
281 }
282
283
284 if __name__ == "__main__":
285 """
286 Run module in test mode.
287 """
288 from py3status.module_test import module_test
289 module_test(Py3status)
290
[end of py3status/modules/clock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py3status/modules/clock.py b/py3status/modules/clock.py
--- a/py3status/modules/clock.py
+++ b/py3status/modules/clock.py
@@ -38,6 +38,10 @@
a list. The one used can be changed by button click.
*(default ['[{name_unclear} ]%c', '[{name_unclear} ]%x %X',
'[{name_unclear} ]%a %H:%M', '[{name_unclear} ]{icon}'])*
+ round_to_nearest_block: defines how a block icon is chosen. Examples:
+ when set to True, '13:14' is '🕐', '13:16' is '🕜' and '13:31' is '🕜';
+ when set to False, '13:14' is '🕐', '13:16' is '🕐' and '13:31' is '🕜'.
+ (default True)
Format placeholders:
{icon} a character representing the time from `blocks`
@@ -87,9 +91,9 @@
london
{'full_text': 'Thursday Feb 23 1:42 AM London'}
"""
+from __future__ import division
import re
-import math
from datetime import datetime
from time import time
@@ -116,6 +120,7 @@
'[{name_unclear} ]%a %H:%M',
'[{name_unclear} ]{icon}',
]
+ round_to_nearest_block = True
def post_config_hook(self):
# Multiple clocks are possible that can be cycled through
@@ -234,10 +239,11 @@
if self.py3.format_contains(format_time, 'icon'):
# calculate the decimal hour
h = t.hour + t.minute / 60.
+ if self.round_to_nearest_block:
+ h += (self.block_hours / len(self.blocks)) / 2
# make 12 hourly etc
h = h % self.block_hours
- idx = int(math.floor(h / self.block_hours * (len(
- self.blocks))))
+ idx = int(h / self.block_hours * len(self.blocks))
icon = self.blocks[idx]
timezone = zone.zone
|
{"golden_diff": "diff --git a/py3status/modules/clock.py b/py3status/modules/clock.py\n--- a/py3status/modules/clock.py\n+++ b/py3status/modules/clock.py\n@@ -38,6 +38,10 @@\n a list. The one used can be changed by button click.\n *(default ['[{name_unclear} ]%c', '[{name_unclear} ]%x %X',\n '[{name_unclear} ]%a %H:%M', '[{name_unclear} ]{icon}'])*\n+ round_to_nearest_block: defines how a block icon is chosen. Examples:\n+ when set to True, '13:14' is '\ud83d\udd50', '13:16' is '\ud83d\udd5c' and '13:31' is '\ud83d\udd5c';\n+ when set to False, '13:14' is '\ud83d\udd50', '13:16' is '\ud83d\udd50' and '13:31' is '\ud83d\udd5c'.\n+ (default True)\n \n Format placeholders:\n {icon} a character representing the time from `blocks`\n@@ -87,9 +91,9 @@\n london\n {'full_text': 'Thursday Feb 23 1:42 AM London'}\n \"\"\"\n+from __future__ import division\n \n import re\n-import math\n from datetime import datetime\n from time import time\n \n@@ -116,6 +120,7 @@\n '[{name_unclear} ]%a %H:%M',\n '[{name_unclear} ]{icon}',\n ]\n+ round_to_nearest_block = True\n \n def post_config_hook(self):\n # Multiple clocks are possible that can be cycled through\n@@ -234,10 +239,11 @@\n if self.py3.format_contains(format_time, 'icon'):\n # calculate the decimal hour\n h = t.hour + t.minute / 60.\n+ if self.round_to_nearest_block:\n+ h += (self.block_hours / len(self.blocks)) / 2\n # make 12 hourly etc\n h = h % self.block_hours\n- idx = int(math.floor(h / self.block_hours * (len(\n- self.blocks))))\n+ idx = int(h / self.block_hours * len(self.blocks))\n icon = self.blocks[idx]\n \n timezone = zone.zone\n", "issue": "[clock] It is more intuitive to round clock emoji to the nearest 15 or 45 minutes\nThe emoji clock is being used in many apps these days, and I noticed that py3status implementation is not consistent with the rest and is less intuitive to me.\r\n\r\nCurrently the clock emoji is not being updated until the hour exactly reaches the next step, so for example:\r\n- 14:59 \ud83d\udd5d\r\n\r\nInstead, it is more intuitive to choose icon for the time rounded to the nearest 15 or 45 minutes, that is:\r\n\r\n- 14:01 \ud83d\udd51\r\n- 14:14 \ud83d\udd51\r\n- 14:15 \ud83d\udd5d\r\n- 14:44 \ud83d\udd5d\r\n- 14:45 \ud83d\udd52\r\n- 14:59 \ud83d\udd52\r\n\r\n/cc @tobes as you created this module.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay date and time.\n\nThis module allows one or more datetimes to be displayed.\nAll datetimes share the same format_time but can set their own timezones.\nTimezones are defined in the `format` using the TZ name in squiggly brackets eg\n`{GMT}`, `{Portugal}`, `{Europe/Paris}`, `{America/Argentina/Buenos_Aires}`.\n\nISO-3166 two letter country codes eg `{de}` can also be used but if more than\none timezone exists for the country eg `{us}` the first one will be selected.\n\n`{Local}` can be used for the local settings of your computer.\n\nNote: Timezones are case sensitive\n\nA full list of timezones can be found at\nhttps://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n\nConfiguration parameters:\n block_hours: length of time period for all blocks in hours (default 12)\n blocks: a string, where each character represents time period\n from the start of a time period.\n (default '\ud83d\udd5b\ud83d\udd67\ud83d\udd50\ud83d\udd5c\ud83d\udd51\ud83d\udd5d\ud83d\udd52\ud83d\udd5e\ud83d\udd53\ud83d\udd5f\ud83d\udd54\ud83d\udd60\ud83d\udd55\ud83d\udd61\ud83d\udd56\ud83d\udd62\ud83d\udd57\ud83d\udd63\ud83d\udd58\ud83d\udd64\ud83d\udd59\ud83d\udd65\ud83d\udd5a\ud83d\udd66')\n button_change_format: button that switches format used setting to None\n disables (default 1)\n button_change_time_format: button that switches format_time used. Setting\n to None disables (default 2)\n button_reset: button that switches display to the first timezone. Setting\n to None disables (default 3)\n cycle: If more than one display then how many seconds between changing the\n display (default 0)\n format: defines the timezones displayed. This can be a single string or a\n list. If a list is supplied then the formats can be cycled through\n using `cycle` or by button click. (default '{Local}')\n format_time: format to use for the time, strftime directives such as `%H`\n can be used this can be either a string or to allow multiple formats as\n a list. The one used can be changed by button click.\n *(default ['[{name_unclear} ]%c', '[{name_unclear} ]%x %X',\n '[{name_unclear} ]%a %H:%M', '[{name_unclear} ]{icon}'])*\n\nFormat placeholders:\n {icon} a character representing the time from `blocks`\n {name} friendly timezone name eg `Buenos Aires`\n {name_unclear} friendly timezone name eg `Buenos Aires` but is empty if\n only one timezone is provided\n {timezone} full timezone name eg `America/Argentina/Buenos_Aires`\n {timezone_unclear} full timezone name eg `America/Argentina/Buenos_Aires`\n but is empty if only one timezone is provided\n\n\nRequires:\n pytz: python library\n tzlocal: python library\n\ni3status.conf example:\n\n```\n# cycling through London, Warsaw, Tokyo\nclock {\n cycle = 30\n format = [\"{Europe/London}\", \"{Europe/Warsaw}\", \"{Asia/Tokyo}\"]\n format_time = \"{name} %H:%M\"\n}\n\n\n# Show the time and date in New York\nclock {\n format = \"Big Apple {America/New_York}\"\n format_time = \"%Y-%m-%d %H:%M:%S\"\n}\n\n\n# wall clocks\nclock {\n format = \"{Asia/Calcutta} {Africa/Nairobi} {Asia/Bangkok}\"\n format_time = \"{name} {icon}\"\n}\n```\n\n@author tobes\n@license BSD\n\nSAMPLE OUTPUT\n{'full_text': 'Sun 15 Jan 2017 23:27:17 GMT'}\n\nlondon\n{'full_text': 'Thursday Feb 23 1:42 AM London'}\n\"\"\"\n\nimport re\nimport math\nfrom datetime import datetime\nfrom time import time\n\nimport pytz\nimport tzlocal\n\nCLOCK_BLOCKS = u'\ud83d\udd5b\ud83d\udd67\ud83d\udd50\ud83d\udd5c\ud83d\udd51\ud83d\udd5d\ud83d\udd52\ud83d\udd5e\ud83d\udd53\ud83d\udd5f\ud83d\udd54\ud83d\udd60\ud83d\udd55\ud83d\udd61\ud83d\udd56\ud83d\udd62\ud83d\udd57\ud83d\udd63\ud83d\udd58\ud83d\udd64\ud83d\udd59\ud83d\udd65\ud83d\udd5a\ud83d\udd66'\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n block_hours = 12\n blocks = CLOCK_BLOCKS\n button_change_format = 1\n button_change_time_format = 2\n button_reset = 3\n cycle = 0\n format = \"{Local}\"\n format_time = [\n '[{name_unclear} ]%c',\n '[{name_unclear} ]%x %X',\n '[{name_unclear} ]%a %H:%M',\n '[{name_unclear} ]{icon}',\n ]\n\n def post_config_hook(self):\n # Multiple clocks are possible that can be cycled through\n if not isinstance(self.format, list):\n self.format = [self.format]\n # if only one item we don't need to cycle\n if len(self.format) == 1:\n self.cycle = 0\n # find any declared timezones eg {Europe/London}\n self._items = {}\n matches = re.findall('\\{([^}]*)\\}', ''.join(self.format))\n for match in matches:\n self._items[match] = self._get_timezone(match)\n\n self.multiple_tz = len(self._items) > 1\n\n if not isinstance(self.format_time, list):\n self.format_time = [self.format_time]\n\n # workout how often in seconds we will need to do an update to keep the\n # display fresh\n self.time_deltas = []\n for format in self.format_time:\n format_time = re.sub('\\{([^}]*)\\}', '', format)\n format_time = format_time.replace('%%', '')\n if '%f' in format_time:\n # microseconds\n time_delta = 0\n elif '%S' in format_time:\n # seconds\n time_delta = 1\n elif '%s' in format_time:\n # seconds since unix epoch start\n time_delta = 1\n elif '%T' in format_time:\n # seconds included in \"%H:%M:%S\"\n time_delta = 1\n elif '%c' in format_time:\n # Locale\u2019s appropriate date and time representation\n time_delta = 1\n elif '%X' in format_time:\n # Locale\u2019s appropriate time representation\n time_delta = 1\n else:\n time_delta = 60\n self.time_deltas.append(time_delta)\n\n self.active_time_format = 0\n\n self._cycle_time = time() + self.cycle\n self.active = 0\n\n def _get_timezone(self, tz):\n \"\"\"\n Find and return the time zone if possible\n \"\"\"\n # special Local timezone\n if tz == 'Local':\n try:\n return tzlocal.get_localzone()\n except pytz.UnknownTimeZoneError:\n return '?'\n\n # we can use a country code to get tz\n # FIXME this is broken for multi-timezone countries eg US\n # for now we just grab the first one\n if len(tz) == 2:\n try:\n zones = pytz.country_timezones(tz)\n except KeyError:\n return '?'\n tz = zones[0]\n\n # get the timezone\n try:\n zone = pytz.timezone(tz)\n except pytz.UnknownTimeZoneError:\n return '?'\n return zone\n\n def _change_active(self, diff):\n self.active = (self.active + diff) % len(self.format)\n\n def on_click(self, i3s_output_list, i3s_config, event):\n \"\"\"\n Switch the displayed module or pass the event on to the active module\n \"\"\"\n # reset cycle time\n if event['button'] == self.button_reset:\n self.active = 0\n # reset the cycle time\n self._cycle_time = time() + self.cycle\n elif event['button'] == self.button_change_time_format:\n self.active_time_format += 1\n if self.active_time_format >= len(self.format_time):\n self.active_time_format = 0\n elif event['button'] == self.button_change_format:\n self._change_active(1)\n\n def clock(self, i3s_output_list, i3s_config):\n\n # cycling\n if self.cycle and time() >= self._cycle_time:\n self._change_active(1)\n self._cycle_time = time() + self.cycle\n\n # update our times\n times = {}\n for name, zone in self._items.items():\n if zone == '?':\n times[name] = '?'\n else:\n t = datetime.now(zone)\n format_time = self.format_time[self.active_time_format]\n icon = None\n if self.py3.format_contains(format_time, 'icon'):\n # calculate the decimal hour\n h = t.hour + t.minute / 60.\n # make 12 hourly etc\n h = h % self.block_hours\n idx = int(math.floor(h / self.block_hours * (len(\n self.blocks))))\n icon = self.blocks[idx]\n\n timezone = zone.zone\n tzname = timezone.split('/')[-1].replace('_', ' ')\n\n if self.multiple_tz:\n name_unclear = tzname\n timezone_unclear = timezone\n else:\n name_unclear = ''\n timezone_unclear = ''\n\n format_time = self.py3.safe_format(\n format_time,\n dict(\n icon=icon,\n name=tzname,\n name_unclear=name_unclear,\n timezone=timezone,\n timezone_unclear=timezone_unclear,\n ))\n if self.py3.is_python_2():\n format_time = t.strftime(format_time.encode('utf-8'))\n else:\n format_time = t.strftime(format_time)\n times[name] = format_time\n\n # work out when we need to update\n timeout = self.py3.time_in(\n sync_to=self.time_deltas[self.active_time_format]\n )\n\n # if cycling we need to make sure we update when they are needed\n if self.cycle:\n cycle_timeout = self._cycle_time\n timeout = min(timeout, cycle_timeout)\n\n return {\n 'full_text': self.py3.safe_format(self.format[self.active], times),\n 'cached_until': timeout\n }\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/clock.py"}]}
| 3,773 | 518 |
gh_patches_debug_39257
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-336
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update eligibility verification form to use dynamic data
## Background
This builds on #208 and #319 to incorporate specific `EligibilityVerifier` properties into the [eligibility verification form](https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/forms.py#L15)
## Tasks
* [x] Create an `__init__()` method on the form class accepting a new kwarg `verifier` of type `EligibilityVerifier`
* [x] If the `verifier.form_sub_pattern` field has a value, add a `pattern` attribute with the value to the `sub` input via [Widget.attrs](https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs)
* [x] If the `verifier.form_name_max_length` field has a value, add a `maxlength` attribute to the `name` input via Widgets.attrs
* [x] Use attributes from the `verifier` as `msgid` keys for labels etc.
* [x] Rework the [`eligibility:confirm` view](https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/views.py#L46) to initialize the `Page` viewmodel and eligibility verification form from the selected verifier
</issue>
<code>
[start of benefits/eligibility/views.py]
1 """
2 The eligibility application: view definitions for the eligibility verification flow.
3 """
4 from django.contrib import messages
5 from django.shortcuts import redirect
6 from django.urls import reverse
7 from django.utils.decorators import decorator_from_middleware
8 from django.utils.translation import pgettext, gettext as _
9
10 from benefits.core import middleware, recaptcha, session, viewmodels
11 from benefits.core.views import PageTemplateResponse, _index_image
12 from . import analytics, api, forms
13
14
15 @decorator_from_middleware(middleware.AgencySessionRequired)
16 @decorator_from_middleware(middleware.VerifierSessionRequired)
17 def index(request):
18 """View handler for the eligibility verification getting started screen."""
19
20 session.update(request, eligibility_types=[], origin=reverse("eligibility:index"))
21
22 page = viewmodels.Page(
23 title=_("eligibility.pages.index.title"),
24 content_title=_("eligibility.pages.index.content_title"),
25 media=[
26 viewmodels.MediaItem(
27 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")),
28 heading=_("eligibility.pages.index.items[0].title"),
29 details=_("eligibility.pages.index.items[0].text"),
30 ),
31 viewmodels.MediaItem(
32 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")),
33 heading=_("eligibility.pages.index.items[1].title"),
34 details=_("eligibility.pages.index.items[1].text"),
35 ),
36 ],
37 paragraphs=[_("eligibility.pages.index.p[0]")],
38 image=_index_image(),
39 button=viewmodels.Button.primary(text=_("eligibility.pages.index.button"), url=reverse("eligibility:confirm")),
40 )
41
42 return PageTemplateResponse(request, page)
43
44
45 @decorator_from_middleware(middleware.AgencySessionRequired)
46 @decorator_from_middleware(middleware.RateLimit)
47 @decorator_from_middleware(middleware.VerifierSessionRequired)
48 def confirm(request):
49 """View handler for the eligibility verification form."""
50
51 page = viewmodels.Page(
52 title=_("eligibility.pages.confirm.title"),
53 content_title=_("eligibility.pages.confirm.content_title"),
54 paragraphs=[_("eligibility.pages.confirm.p[0]")],
55 form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=""),
56 classes="text-lg-center",
57 )
58
59 if request.method == "POST":
60 analytics.started_eligibility(request)
61
62 form = forms.EligibilityVerificationForm(request.POST)
63 response = _verify(request, form)
64
65 if response is None:
66 # form was not valid, allow for correction/resubmission
67 analytics.returned_error(request, form.errors)
68 page.forms = [form]
69 response = PageTemplateResponse(request, page)
70 elif session.eligible(request):
71 eligibility = session.eligibility(request)
72 response = verified(request, [eligibility.name])
73 else:
74 response = PageTemplateResponse(request, page)
75
76 return response
77
78
79 def _verify(request, form):
80 """Helper calls the eligibility verification API with user input."""
81
82 if not form.is_valid():
83 if recaptcha.has_error(form):
84 messages.error(request, "Recaptcha failed. Please try again.")
85 return None
86
87 sub, name = form.cleaned_data.get("sub"), form.cleaned_data.get("name")
88
89 agency = session.agency(request)
90 verifier = session.verifier(request)
91 client = api.Client(agency, verifier)
92
93 response = client.verify(sub, name)
94
95 if response.error and any(response.error):
96 form.add_api_errors(response.error)
97 return None
98 elif any(response.eligibility):
99 return verified(request, response.eligibility)
100 else:
101 return unverified(request)
102
103
104 @decorator_from_middleware(middleware.AgencySessionRequired)
105 def verified(request, verified_types):
106 """View handler for the verified eligibility page."""
107
108 analytics.returned_success(request)
109
110 enrollment_index = reverse("enrollment:index")
111 session.update(request, eligibility_types=verified_types, origin=enrollment_index)
112
113 return redirect(enrollment_index)
114
115
116 @decorator_from_middleware(middleware.AgencySessionRequired)
117 def unverified(request):
118 """View handler for the unverified eligibility page."""
119
120 analytics.returned_fail(request)
121
122 # tel: link to agency phone number
123 agency = session.agency(request)
124 buttons = viewmodels.Button.agency_contact_links(agency)
125
126 page = viewmodels.Page(
127 title=_("eligibility.pages.unverified.title"),
128 content_title=_("eligibility.pages.unverified.content_title"),
129 icon=viewmodels.Icon("idcardquestion", pgettext("image alt text", "core.icons.idcardquestion")),
130 paragraphs=[_("eligibility.pages.unverified.p[0]"), _("eligibility.pages.unverified.p[1]")],
131 buttons=buttons,
132 classes="text-lg-center",
133 )
134
135 return PageTemplateResponse(request, page)
136
[end of benefits/eligibility/views.py]
[start of benefits/eligibility/forms.py]
1 """
2 The eligibility application: Form definition for the eligibility verification flow.
3 """
4 import logging
5
6 from django import forms
7 from django.utils.translation import gettext_lazy as _
8
9 from benefits.core import recaptcha, widgets
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class EligibilityVerificationForm(forms.Form):
16 """Form to collect eligibility verification details."""
17
18 action_url = "eligibility:confirm"
19 method = "POST"
20
21 sub = forms.CharField(
22 label=_("eligibility.forms.confirm.fields.sub"), widget=widgets.FormControlTextInput(placeholder="A1234567")
23 )
24
25 name = forms.CharField(
26 label=_("eligibility.forms.confirm.fields.name"), widget=widgets.FormControlTextInput(placeholder="Rodriguez")
27 )
28
29 submit_value = _("eligibility.forms.confirm.submit")
30 submitting_value = _("eligibility.forms.confirm.submitting")
31
32 _error_messages = {
33 "invalid": _("eligibility.forms.confirm.errors.invalid"),
34 "missing": _("eligibility.forms.confirm.errors.missing"),
35 }
36
37 def add_api_errors(self, form_errors):
38 """Handle errors passed back from API server related to submitted form values."""
39
40 validation_errors = {
41 field: forms.ValidationError(self._error_messages.get(code, _("core.pages.error.title")), code=code)
42 for (field, code) in form_errors.items()
43 if field in self.fields
44 }
45
46 if len(validation_errors) > 0:
47 logger.warning("Form fields are invalid")
48
49 for (field, err) in validation_errors.items():
50 self.add_error(field, err)
51
52 def clean(self):
53 if not recaptcha.verify(self.data):
54 raise forms.ValidationError("reCAPTCHA failed")
55
[end of benefits/eligibility/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py
--- a/benefits/eligibility/forms.py
+++ b/benefits/eligibility/forms.py
@@ -6,7 +6,7 @@
from django import forms
from django.utils.translation import gettext_lazy as _
-from benefits.core import recaptcha, widgets
+from benefits.core import models, recaptcha, widgets
logger = logging.getLogger(__name__)
@@ -18,14 +18,6 @@
action_url = "eligibility:confirm"
method = "POST"
- sub = forms.CharField(
- label=_("eligibility.forms.confirm.fields.sub"), widget=widgets.FormControlTextInput(placeholder="A1234567")
- )
-
- name = forms.CharField(
- label=_("eligibility.forms.confirm.fields.name"), widget=widgets.FormControlTextInput(placeholder="Rodriguez")
- )
-
submit_value = _("eligibility.forms.confirm.submit")
submitting_value = _("eligibility.forms.confirm.submitting")
@@ -34,6 +26,21 @@
"missing": _("eligibility.forms.confirm.errors.missing"),
}
+ def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)
+ if verifier.form_sub_pattern:
+ sub_widget.attrs.update({"pattern": verifier.form_sub_pattern})
+
+ self.fields["sub"] = forms.CharField(label=_(verifier.form_sub_label), widget=sub_widget)
+
+ name_widget = widgets.FormControlTextInput(placeholder=verifier.form_name_placeholder)
+ if verifier.form_name_max_length:
+ name_widget.attrs.update({"maxlength": verifier.form_name_max_length})
+
+ self.fields["name"] = forms.CharField(label=_(verifier.form_name_label), widget=name_widget)
+
def add_api_errors(self, form_errors):
"""Handle errors passed back from API server related to submitted form values."""
diff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py
--- a/benefits/eligibility/views.py
+++ b/benefits/eligibility/views.py
@@ -48,18 +48,20 @@
def confirm(request):
"""View handler for the eligibility verification form."""
+ verifier = session.verifier(request)
+
page = viewmodels.Page(
- title=_("eligibility.pages.confirm.title"),
- content_title=_("eligibility.pages.confirm.content_title"),
- paragraphs=[_("eligibility.pages.confirm.p[0]")],
- form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=""),
+ title=_(verifier.form_title),
+ content_title=_(verifier.form_content_title),
+ paragraphs=[_(verifier.form_blurb)],
+ form=forms.EligibilityVerificationForm(auto_id=True, label_suffix="", verifier=verifier),
classes="text-lg-center",
)
if request.method == "POST":
analytics.started_eligibility(request)
- form = forms.EligibilityVerificationForm(request.POST)
+ form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)
response = _verify(request, form)
if response is None:
|
{"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -6,7 +6,7 @@\n from django import forms\n from django.utils.translation import gettext_lazy as _\n \n-from benefits.core import recaptcha, widgets\n+from benefits.core import models, recaptcha, widgets\n \n \n logger = logging.getLogger(__name__)\n@@ -18,14 +18,6 @@\n action_url = \"eligibility:confirm\"\n method = \"POST\"\n \n- sub = forms.CharField(\n- label=_(\"eligibility.forms.confirm.fields.sub\"), widget=widgets.FormControlTextInput(placeholder=\"A1234567\")\n- )\n-\n- name = forms.CharField(\n- label=_(\"eligibility.forms.confirm.fields.name\"), widget=widgets.FormControlTextInput(placeholder=\"Rodriguez\")\n- )\n-\n submit_value = _(\"eligibility.forms.confirm.submit\")\n submitting_value = _(\"eligibility.forms.confirm.submitting\")\n \n@@ -34,6 +26,21 @@\n \"missing\": _(\"eligibility.forms.confirm.errors.missing\"),\n }\n \n+ def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+\n+ sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)\n+ if verifier.form_sub_pattern:\n+ sub_widget.attrs.update({\"pattern\": verifier.form_sub_pattern})\n+\n+ self.fields[\"sub\"] = forms.CharField(label=_(verifier.form_sub_label), widget=sub_widget)\n+\n+ name_widget = widgets.FormControlTextInput(placeholder=verifier.form_name_placeholder)\n+ if verifier.form_name_max_length:\n+ name_widget.attrs.update({\"maxlength\": verifier.form_name_max_length})\n+\n+ self.fields[\"name\"] = forms.CharField(label=_(verifier.form_name_label), widget=name_widget)\n+\n def add_api_errors(self, form_errors):\n \"\"\"Handle errors passed back from API server related to submitted form values.\"\"\"\n \ndiff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py\n--- a/benefits/eligibility/views.py\n+++ b/benefits/eligibility/views.py\n@@ -48,18 +48,20 @@\n def confirm(request):\n \"\"\"View handler for the eligibility verification form.\"\"\"\n \n+ verifier = session.verifier(request)\n+\n page = viewmodels.Page(\n- title=_(\"eligibility.pages.confirm.title\"),\n- content_title=_(\"eligibility.pages.confirm.content_title\"),\n- paragraphs=[_(\"eligibility.pages.confirm.p[0]\")],\n- form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\"),\n+ title=_(verifier.form_title),\n+ content_title=_(verifier.form_content_title),\n+ paragraphs=[_(verifier.form_blurb)],\n+ form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\", verifier=verifier),\n classes=\"text-lg-center\",\n )\n \n if request.method == \"POST\":\n analytics.started_eligibility(request)\n \n- form = forms.EligibilityVerificationForm(request.POST)\n+ form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)\n response = _verify(request, form)\n \n if response is None:\n", "issue": "Update eligibility verification form to use dynamic data\n## Background\r\n\r\nThis builds on #208 and #319 to incorporate specific `EligibilityVerifier` properties into the [eligibility verification form](https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/forms.py#L15)\r\n\r\n## Tasks\r\n\r\n* [x] Create an `__init__()` method on the form class accepting a new kwarg `verifier` of type `EligibilityVerifier`\r\n* [x] If the `verifier.form_sub_pattern` field has a value, add a `pattern` attribute with the value to the `sub` input via [Widget.attrs](https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs)\r\n* [x] If the `verifier.form_name_max_length` field has a value, add a `maxlength` attribute to the `name` input via Widgets.attrs\r\n* [x] Use attributes from the `verifier` as `msgid` keys for labels etc.\r\n* [x] Rework the [`eligibility:confirm` view](https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/views.py#L46) to initialize the `Page` viewmodel and eligibility verification form from the selected verifier\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: view definitions for the eligibility verification flow.\n\"\"\"\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import middleware, recaptcha, session, viewmodels\nfrom benefits.core.views import PageTemplateResponse, _index_image\nfrom . import analytics, api, forms\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef index(request):\n \"\"\"View handler for the eligibility verification getting started screen.\"\"\"\n\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:index\"))\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.index.title\"),\n content_title=_(\"eligibility.pages.index.content_title\"),\n media=[\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n heading=_(\"eligibility.pages.index.items[0].title\"),\n details=_(\"eligibility.pages.index.items[0].text\"),\n ),\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"eligibility.pages.index.items[1].title\"),\n details=_(\"eligibility.pages.index.items[1].text\"),\n ),\n ],\n paragraphs=[_(\"eligibility.pages.index.p[0]\")],\n image=_index_image(),\n button=viewmodels.Button.primary(text=_(\"eligibility.pages.index.button\"), url=reverse(\"eligibility:confirm\")),\n )\n\n return PageTemplateResponse(request, page)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.RateLimit)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef confirm(request):\n \"\"\"View handler for the eligibility verification form.\"\"\"\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.confirm.title\"),\n content_title=_(\"eligibility.pages.confirm.content_title\"),\n paragraphs=[_(\"eligibility.pages.confirm.p[0]\")],\n form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\"),\n classes=\"text-lg-center\",\n )\n\n if request.method == \"POST\":\n analytics.started_eligibility(request)\n\n form = forms.EligibilityVerificationForm(request.POST)\n response = _verify(request, form)\n\n if response is None:\n # form was not valid, allow for correction/resubmission\n analytics.returned_error(request, form.errors)\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n elif session.eligible(request):\n eligibility = session.eligibility(request)\n response = verified(request, [eligibility.name])\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\ndef _verify(request, form):\n \"\"\"Helper calls the eligibility verification API with user input.\"\"\"\n\n if not form.is_valid():\n if recaptcha.has_error(form):\n messages.error(request, \"Recaptcha failed. Please try again.\")\n return None\n\n sub, name = form.cleaned_data.get(\"sub\"), form.cleaned_data.get(\"name\")\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n client = api.Client(agency, verifier)\n\n response = client.verify(sub, name)\n\n if response.error and any(response.error):\n form.add_api_errors(response.error)\n return None\n elif any(response.eligibility):\n return verified(request, response.eligibility)\n else:\n return unverified(request)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef verified(request, verified_types):\n \"\"\"View handler for the verified eligibility page.\"\"\"\n\n analytics.returned_success(request)\n\n enrollment_index = reverse(\"enrollment:index\")\n session.update(request, eligibility_types=verified_types, origin=enrollment_index)\n\n return redirect(enrollment_index)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef unverified(request):\n \"\"\"View handler for the unverified eligibility page.\"\"\"\n\n analytics.returned_fail(request)\n\n # tel: link to agency phone number\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.unverified.title\"),\n content_title=_(\"eligibility.pages.unverified.content_title\"),\n icon=viewmodels.Icon(\"idcardquestion\", pgettext(\"image alt text\", \"core.icons.idcardquestion\")),\n paragraphs=[_(\"eligibility.pages.unverified.p[0]\"), _(\"eligibility.pages.unverified.p[1]\")],\n buttons=buttons,\n classes=\"text-lg-center\",\n )\n\n return PageTemplateResponse(request, page)\n", "path": "benefits/eligibility/views.py"}, {"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import recaptcha, widgets\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n method = \"POST\"\n\n sub = forms.CharField(\n label=_(\"eligibility.forms.confirm.fields.sub\"), widget=widgets.FormControlTextInput(placeholder=\"A1234567\")\n )\n\n name = forms.CharField(\n label=_(\"eligibility.forms.confirm.fields.name\"), widget=widgets.FormControlTextInput(placeholder=\"Rodriguez\")\n )\n\n submit_value = _(\"eligibility.forms.confirm.submit\")\n submitting_value = _(\"eligibility.forms.confirm.submitting\")\n\n _error_messages = {\n \"invalid\": _(\"eligibility.forms.confirm.errors.invalid\"),\n \"missing\": _(\"eligibility.forms.confirm.errors.missing\"),\n }\n\n def add_api_errors(self, form_errors):\n \"\"\"Handle errors passed back from API server related to submitted form values.\"\"\"\n\n validation_errors = {\n field: forms.ValidationError(self._error_messages.get(code, _(\"core.pages.error.title\")), code=code)\n for (field, code) in form_errors.items()\n if field in self.fields\n }\n\n if len(validation_errors) > 0:\n logger.warning(\"Form fields are invalid\")\n\n for (field, err) in validation_errors.items():\n self.add_error(field, err)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n", "path": "benefits/eligibility/forms.py"}]}
| 2,602 | 720 |
gh_patches_debug_33001
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3456
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary and deranging override message when using alias packages
In a situation with 3 packages CA, CB and CC where CB requires CA (via an alias) and CC requires CB and CA also via aliases, when installing CC with a "conan install ..." conan reports a version override while actually the alias is pointing to exaclty the version of the package that conan says overrides the alias:
CB/0.0.2@user/testing requirement CA/ALIAS@user/testing overriden by CC/0.0.3@user/testing to CA/0.0.1@user/testing
To help us debug your issue please `explain:`
- [X] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [X] I've specified the Conan version, operating system version and any tool that can be relevant.
- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
</issue>
<code>
[start of conans/client/graph/graph_builder.py]
1 import time
2
3 from conans.model.conan_file import get_env_context_manager
4 from conans.model.requires import Requirements
5 from conans.model.ref import ConanFileReference
6 from conans.errors import (ConanException, conanfile_exception_formatter,
7 ConanExceptionInUserConanfileMethod)
8 from conans.client.output import ScopedOutput
9 from conans.util.log import logger
10 from conans.client.graph.graph import DepsGraph, Node, RECIPE_WORKSPACE
11 from conans.model.workspace import WORKSPACE_FILE
12
13
14 class DepsGraphBuilder(object):
15 """ Responsible for computing the dependencies graph DepsGraph
16 """
17 def __init__(self, proxy, output, loader, resolver, workspace, recorder):
18 self._proxy = proxy
19 self._output = output
20 self._loader = loader
21 self._resolver = resolver
22 self._workspace = workspace
23 self._recorder = recorder
24
25 def load_graph(self, conanfile, check_updates, update, remote_name, processed_profile):
26 check_updates = check_updates or update
27 dep_graph = DepsGraph()
28 # compute the conanfile entry point for this dependency graph
29 root_node = Node(None, conanfile)
30 dep_graph.add_node(root_node)
31 public_deps = {} # {name: Node} dict with public nodes, so they are not added again
32 aliased = {}
33 # enter recursive computation
34 t1 = time.time()
35 loop_ancestors = []
36 self._load_deps(root_node, Requirements(), dep_graph, public_deps, None, None,
37 loop_ancestors, aliased, check_updates, update, remote_name,
38 processed_profile)
39 logger.debug("Deps-builder: Time to load deps %s" % (time.time() - t1))
40 t1 = time.time()
41 dep_graph.compute_package_ids()
42 logger.debug("Deps-builder: Propagate info %s" % (time.time() - t1))
43 return dep_graph
44
45 def _resolve_deps(self, node, aliased, update, remote_name):
46 # Resolve possible version ranges of the current node requirements
47 # new_reqs is a shallow copy of what is propagated upstream, so changes done by the
48 # RangeResolver are also done in new_reqs, and then propagated!
49 conanfile, conanref = node.conanfile, node.conan_ref
50 for _, require in conanfile.requires.items():
51 self._resolver.resolve(require, conanref, update, remote_name)
52
53 # After resolving ranges,
54 for req in conanfile.requires.values():
55 alias = aliased.get(req.conan_reference)
56 if alias:
57 req.conan_reference = alias
58
59 if not hasattr(conanfile, "_conan_evaluated_requires"):
60 conanfile._conan_evaluated_requires = conanfile.requires.copy()
61 elif conanfile.requires != conanfile._conan_evaluated_requires:
62 raise ConanException("%s: Incompatible requirements obtained in different "
63 "evaluations of 'requirements'\n"
64 " Previous requirements: %s\n"
65 " New requirements: %s"
66 % (conanref, list(conanfile._conan_evaluated_requires.values()),
67 list(conanfile.requires.values())))
68
69 def _load_deps(self, node, down_reqs, dep_graph, public_deps, down_ref, down_options,
70 loop_ancestors, aliased, check_updates, update, remote_name, processed_profile):
71 """ loads a Conan object from the given file
72 param node: Node object to be expanded in this step
73 down_reqs: the Requirements as coming from downstream, which can overwrite current
74 values
75 param deps: DepsGraph result
76 param public_deps: {name: Node} of already expanded public Nodes, not to be repeated
77 in graph
78 param down_ref: ConanFileReference of who is depending on current node for this expansion
79 """
80 # basic node configuration
81 new_reqs, new_options = self._config_node(node, down_reqs, down_ref, down_options)
82
83 self._resolve_deps(node, aliased, update, remote_name)
84
85 # Expand each one of the current requirements
86 for name, require in node.conanfile.requires.items():
87 if require.override:
88 continue
89 if require.conan_reference in loop_ancestors:
90 raise ConanException("Loop detected: %s"
91 % "->".join(str(r) for r in loop_ancestors))
92 new_loop_ancestors = loop_ancestors[:] # Copy for propagating
93 new_loop_ancestors.append(require.conan_reference)
94 previous = public_deps.get(name)
95 if require.private or not previous: # new node, must be added and expanded
96 new_node = self._create_new_node(node, dep_graph, require, public_deps, name,
97 aliased, check_updates, update, remote_name,
98 processed_profile)
99 # RECURSION!
100 # Make sure the subgraph is truly private
101 new_public_deps = {} if require.private else public_deps
102 self._load_deps(new_node, new_reqs, dep_graph, new_public_deps, node.conan_ref,
103 new_options, new_loop_ancestors, aliased, check_updates, update,
104 remote_name, processed_profile)
105 else: # a public node already exist with this name
106 previous_node, closure = previous
107 alias_ref = aliased.get(require.conan_reference, require.conan_reference)
108 # Necessary to make sure that it is pointing to the correct aliased
109 require.conan_reference = alias_ref
110 if previous_node.conan_ref != alias_ref:
111 raise ConanException("Conflict in %s\n"
112 " Requirement %s conflicts with already defined %s\n"
113 " Keeping %s\n"
114 " To change it, override it in your base requirements"
115 % (node.conan_ref, require.conan_reference,
116 previous_node.conan_ref, previous_node.conan_ref))
117 dep_graph.add_edge(node, previous_node)
118 # RECURSION!
119 if closure is None:
120 closure = dep_graph.closure(node)
121 public_deps[name] = previous_node, closure
122 if self._recurse(closure, new_reqs, new_options):
123 self._load_deps(previous_node, new_reqs, dep_graph, public_deps, node.conan_ref,
124 new_options, new_loop_ancestors, aliased, check_updates, update,
125 remote_name, processed_profile)
126
127 def _recurse(self, closure, new_reqs, new_options):
128 """ For a given closure, if some requirements or options coming from downstream
129 is incompatible with the current closure, then it is necessary to recurse
130 then, incompatibilities will be raised as usually"""
131 for req in new_reqs.values():
132 n = closure.get(req.conan_reference.name)
133 if n and n.conan_ref != req.conan_reference:
134 return True
135 for pkg_name, options_values in new_options.items():
136 n = closure.get(pkg_name)
137 if n:
138 options = n.conanfile.options
139 for option, value in options_values.items():
140 if getattr(options, option) != value:
141 return True
142 return False
143
144 def _config_node(self, node, down_reqs, down_ref, down_options):
145 """ update settings and option in the current ConanFile, computing actual
146 requirement values, cause they can be overridden by downstream requires
147 param settings: dict of settings values => {"os": "windows"}
148 """
149 try:
150 conanfile, conanref = node.conanfile, node.conan_ref
151 # Avoid extra time manipulating the sys.path for python
152 with get_env_context_manager(conanfile, without_python=True):
153 if hasattr(conanfile, "config"):
154 if not conanref:
155 output = ScopedOutput(str("PROJECT"), self._output)
156 output.warn("config() has been deprecated."
157 " Use config_options and configure")
158 with conanfile_exception_formatter(str(conanfile), "config"):
159 conanfile.config()
160 with conanfile_exception_formatter(str(conanfile), "config_options"):
161 conanfile.config_options()
162 conanfile.options.propagate_upstream(down_options, down_ref, conanref)
163 if hasattr(conanfile, "config"):
164 with conanfile_exception_formatter(str(conanfile), "config"):
165 conanfile.config()
166
167 with conanfile_exception_formatter(str(conanfile), "configure"):
168 conanfile.configure()
169
170 conanfile.settings.validate() # All has to be ok!
171 conanfile.options.validate()
172
173 # Update requirements (overwrites), computing new upstream
174 if hasattr(conanfile, "requirements"):
175 # If re-evaluating the recipe, in a diamond graph, with different options,
176 # it could happen that one execution path of requirements() defines a package
177 # and another one a different package raising Duplicate dependency error
178 # Or the two consecutive calls, adding 2 different dependencies for the two paths
179 # So it is necessary to save the "requires" state and restore it before a second
180 # execution of requirements(). It is a shallow copy, if first iteration is
181 # RequireResolve'd or overridden, the inner requirements are modified
182 if not hasattr(conanfile, "_conan_original_requires"):
183 conanfile._conan_original_requires = conanfile.requires.copy()
184 else:
185 conanfile.requires = conanfile._conan_original_requires.copy()
186
187 with conanfile_exception_formatter(str(conanfile), "requirements"):
188 conanfile.requirements()
189
190 new_options = conanfile.options.deps_package_values
191 new_down_reqs = conanfile.requires.update(down_reqs, self._output, conanref,
192 down_ref)
193 except ConanExceptionInUserConanfileMethod:
194 raise
195 except ConanException as e:
196 raise ConanException("%s: %s" % (conanref or "Conanfile", str(e)))
197 except Exception as e:
198 raise ConanException(e)
199
200 return new_down_reqs, new_options
201
202 def _create_new_node(self, current_node, dep_graph, requirement, public_deps, name_req, aliased,
203 check_updates, update, remote_name, processed_profile, alias_ref=None):
204 """ creates and adds a new node to the dependency graph
205 """
206 workspace_package = self._workspace[requirement.conan_reference] if self._workspace else None
207 if workspace_package:
208 conanfile_path = workspace_package.conanfile_path
209 recipe_status = RECIPE_WORKSPACE
210 remote = WORKSPACE_FILE
211 else:
212 try:
213 result = self._proxy.get_recipe(requirement.conan_reference,
214 check_updates, update, remote_name, self._recorder)
215 except ConanException as e:
216 base_ref = str(current_node.conan_ref or "PROJECT")
217 self._output.error("Failed requirement '%s' from '%s'"
218 % (requirement.conan_reference, base_ref))
219 raise e
220 conanfile_path, recipe_status, remote, _ = result
221
222 output = ScopedOutput(str(requirement.conan_reference), self._output)
223 dep_conanfile = self._loader.load_conanfile(conanfile_path, output, processed_profile,
224 reference=requirement.conan_reference)
225
226 if workspace_package:
227 workspace_package.conanfile = dep_conanfile
228 if getattr(dep_conanfile, "alias", None):
229 alias_reference = alias_ref or requirement.conan_reference
230 requirement.conan_reference = ConanFileReference.loads(dep_conanfile.alias)
231 aliased[alias_reference] = requirement.conan_reference
232 return self._create_new_node(current_node, dep_graph, requirement, public_deps,
233 name_req, aliased, check_updates, update,
234 remote_name, processed_profile, alias_ref=alias_reference)
235
236 new_node = Node(requirement.conan_reference, dep_conanfile)
237 new_node.recipe = recipe_status
238 new_node.remote = remote
239 dep_graph.add_node(new_node)
240 dep_graph.add_edge(current_node, new_node, requirement.private)
241 if not requirement.private:
242 public_deps[name_req] = new_node, None
243 return new_node
244
[end of conans/client/graph/graph_builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/client/graph/graph_builder.py b/conans/client/graph/graph_builder.py
--- a/conans/client/graph/graph_builder.py
+++ b/conans/client/graph/graph_builder.py
@@ -78,7 +78,7 @@
param down_ref: ConanFileReference of who is depending on current node for this expansion
"""
# basic node configuration
- new_reqs, new_options = self._config_node(node, down_reqs, down_ref, down_options)
+ new_reqs, new_options = self._config_node(node, down_reqs, down_ref, down_options, aliased)
self._resolve_deps(node, aliased, update, remote_name)
@@ -141,7 +141,7 @@
return True
return False
- def _config_node(self, node, down_reqs, down_ref, down_options):
+ def _config_node(self, node, down_reqs, down_ref, down_options, aliased):
""" update settings and option in the current ConanFile, computing actual
requirement values, cause they can be overridden by downstream requires
param settings: dict of settings values => {"os": "windows"}
@@ -188,8 +188,12 @@
conanfile.requirements()
new_options = conanfile.options.deps_package_values
- new_down_reqs = conanfile.requires.update(down_reqs, self._output, conanref,
- down_ref)
+ if aliased:
+ for req in conanfile.requires.values():
+ req.conan_reference = aliased.get(req.conan_reference,
+ req.conan_reference)
+ new_down_reqs = conanfile.requires.update(down_reqs, self._output,
+ conanref, down_ref)
except ConanExceptionInUserConanfileMethod:
raise
except ConanException as e:
|
{"golden_diff": "diff --git a/conans/client/graph/graph_builder.py b/conans/client/graph/graph_builder.py\n--- a/conans/client/graph/graph_builder.py\n+++ b/conans/client/graph/graph_builder.py\n@@ -78,7 +78,7 @@\n param down_ref: ConanFileReference of who is depending on current node for this expansion\n \"\"\"\n # basic node configuration\n- new_reqs, new_options = self._config_node(node, down_reqs, down_ref, down_options)\n+ new_reqs, new_options = self._config_node(node, down_reqs, down_ref, down_options, aliased)\n \n self._resolve_deps(node, aliased, update, remote_name)\n \n@@ -141,7 +141,7 @@\n return True\n return False\n \n- def _config_node(self, node, down_reqs, down_ref, down_options):\n+ def _config_node(self, node, down_reqs, down_ref, down_options, aliased):\n \"\"\" update settings and option in the current ConanFile, computing actual\n requirement values, cause they can be overridden by downstream requires\n param settings: dict of settings values => {\"os\": \"windows\"}\n@@ -188,8 +188,12 @@\n conanfile.requirements()\n \n new_options = conanfile.options.deps_package_values\n- new_down_reqs = conanfile.requires.update(down_reqs, self._output, conanref,\n- down_ref)\n+ if aliased:\n+ for req in conanfile.requires.values():\n+ req.conan_reference = aliased.get(req.conan_reference,\n+ req.conan_reference)\n+ new_down_reqs = conanfile.requires.update(down_reqs, self._output,\n+ conanref, down_ref)\n except ConanExceptionInUserConanfileMethod:\n raise\n except ConanException as e:\n", "issue": "Unnecessary and deranging override message when using alias packages\nIn a situation with 3 packages CA, CB and CC where CB requires CA (via an alias) and CC requires CB and CA also via aliases, when installing CC with a \"conan install ...\" conan reports a version override while actually the alias is pointing to exaclty the version of the package that conan says overrides the alias:\r\n\r\nCB/0.0.2@user/testing requirement CA/ALIAS@user/testing overriden by CC/0.0.3@user/testing to CA/0.0.1@user/testing\r\n\r\n\r\nTo help us debug your issue please `explain:`\r\n\r\n- [X] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [X] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import time\n\nfrom conans.model.conan_file import get_env_context_manager\nfrom conans.model.requires import Requirements\nfrom conans.model.ref import ConanFileReference\nfrom conans.errors import (ConanException, conanfile_exception_formatter,\n ConanExceptionInUserConanfileMethod)\nfrom conans.client.output import ScopedOutput\nfrom conans.util.log import logger\nfrom conans.client.graph.graph import DepsGraph, Node, RECIPE_WORKSPACE\nfrom conans.model.workspace import WORKSPACE_FILE\n\n\nclass DepsGraphBuilder(object):\n \"\"\" Responsible for computing the dependencies graph DepsGraph\n \"\"\"\n def __init__(self, proxy, output, loader, resolver, workspace, recorder):\n self._proxy = proxy\n self._output = output\n self._loader = loader\n self._resolver = resolver\n self._workspace = workspace\n self._recorder = recorder\n\n def load_graph(self, conanfile, check_updates, update, remote_name, processed_profile):\n check_updates = check_updates or update\n dep_graph = DepsGraph()\n # compute the conanfile entry point for this dependency graph\n root_node = Node(None, conanfile)\n dep_graph.add_node(root_node)\n public_deps = {} # {name: Node} dict with public nodes, so they are not added again\n aliased = {}\n # enter recursive computation\n t1 = time.time()\n loop_ancestors = []\n self._load_deps(root_node, Requirements(), dep_graph, public_deps, None, None,\n loop_ancestors, aliased, check_updates, update, remote_name,\n processed_profile)\n logger.debug(\"Deps-builder: Time to load deps %s\" % (time.time() - t1))\n t1 = time.time()\n dep_graph.compute_package_ids()\n logger.debug(\"Deps-builder: Propagate info %s\" % (time.time() - t1))\n return dep_graph\n\n def _resolve_deps(self, node, aliased, update, remote_name):\n # Resolve possible version ranges of the current node requirements\n # new_reqs is a shallow copy of what is propagated upstream, so changes done by the\n # RangeResolver are also done in new_reqs, and then propagated!\n conanfile, conanref = node.conanfile, node.conan_ref\n for _, require in conanfile.requires.items():\n self._resolver.resolve(require, conanref, update, remote_name)\n\n # After resolving ranges,\n for req in conanfile.requires.values():\n alias = aliased.get(req.conan_reference)\n if alias:\n req.conan_reference = alias\n\n if not hasattr(conanfile, \"_conan_evaluated_requires\"):\n conanfile._conan_evaluated_requires = conanfile.requires.copy()\n elif conanfile.requires != conanfile._conan_evaluated_requires:\n raise ConanException(\"%s: Incompatible requirements obtained in different \"\n \"evaluations of 'requirements'\\n\"\n \" Previous requirements: %s\\n\"\n \" New requirements: %s\"\n % (conanref, list(conanfile._conan_evaluated_requires.values()),\n list(conanfile.requires.values())))\n\n def _load_deps(self, node, down_reqs, dep_graph, public_deps, down_ref, down_options,\n loop_ancestors, aliased, check_updates, update, remote_name, processed_profile):\n \"\"\" loads a Conan object from the given file\n param node: Node object to be expanded in this step\n down_reqs: the Requirements as coming from downstream, which can overwrite current\n values\n param deps: DepsGraph result\n param public_deps: {name: Node} of already expanded public Nodes, not to be repeated\n in graph\n param down_ref: ConanFileReference of who is depending on current node for this expansion\n \"\"\"\n # basic node configuration\n new_reqs, new_options = self._config_node(node, down_reqs, down_ref, down_options)\n\n self._resolve_deps(node, aliased, update, remote_name)\n\n # Expand each one of the current requirements\n for name, require in node.conanfile.requires.items():\n if require.override:\n continue\n if require.conan_reference in loop_ancestors:\n raise ConanException(\"Loop detected: %s\"\n % \"->\".join(str(r) for r in loop_ancestors))\n new_loop_ancestors = loop_ancestors[:] # Copy for propagating\n new_loop_ancestors.append(require.conan_reference)\n previous = public_deps.get(name)\n if require.private or not previous: # new node, must be added and expanded\n new_node = self._create_new_node(node, dep_graph, require, public_deps, name,\n aliased, check_updates, update, remote_name,\n processed_profile)\n # RECURSION!\n # Make sure the subgraph is truly private\n new_public_deps = {} if require.private else public_deps\n self._load_deps(new_node, new_reqs, dep_graph, new_public_deps, node.conan_ref,\n new_options, new_loop_ancestors, aliased, check_updates, update,\n remote_name, processed_profile)\n else: # a public node already exist with this name\n previous_node, closure = previous\n alias_ref = aliased.get(require.conan_reference, require.conan_reference)\n # Necessary to make sure that it is pointing to the correct aliased\n require.conan_reference = alias_ref\n if previous_node.conan_ref != alias_ref:\n raise ConanException(\"Conflict in %s\\n\"\n \" Requirement %s conflicts with already defined %s\\n\"\n \" Keeping %s\\n\"\n \" To change it, override it in your base requirements\"\n % (node.conan_ref, require.conan_reference,\n previous_node.conan_ref, previous_node.conan_ref))\n dep_graph.add_edge(node, previous_node)\n # RECURSION!\n if closure is None:\n closure = dep_graph.closure(node)\n public_deps[name] = previous_node, closure\n if self._recurse(closure, new_reqs, new_options):\n self._load_deps(previous_node, new_reqs, dep_graph, public_deps, node.conan_ref,\n new_options, new_loop_ancestors, aliased, check_updates, update,\n remote_name, processed_profile)\n\n def _recurse(self, closure, new_reqs, new_options):\n \"\"\" For a given closure, if some requirements or options coming from downstream\n is incompatible with the current closure, then it is necessary to recurse\n then, incompatibilities will be raised as usually\"\"\"\n for req in new_reqs.values():\n n = closure.get(req.conan_reference.name)\n if n and n.conan_ref != req.conan_reference:\n return True\n for pkg_name, options_values in new_options.items():\n n = closure.get(pkg_name)\n if n:\n options = n.conanfile.options\n for option, value in options_values.items():\n if getattr(options, option) != value:\n return True\n return False\n\n def _config_node(self, node, down_reqs, down_ref, down_options):\n \"\"\" update settings and option in the current ConanFile, computing actual\n requirement values, cause they can be overridden by downstream requires\n param settings: dict of settings values => {\"os\": \"windows\"}\n \"\"\"\n try:\n conanfile, conanref = node.conanfile, node.conan_ref\n # Avoid extra time manipulating the sys.path for python\n with get_env_context_manager(conanfile, without_python=True):\n if hasattr(conanfile, \"config\"):\n if not conanref:\n output = ScopedOutput(str(\"PROJECT\"), self._output)\n output.warn(\"config() has been deprecated.\"\n \" Use config_options and configure\")\n with conanfile_exception_formatter(str(conanfile), \"config\"):\n conanfile.config()\n with conanfile_exception_formatter(str(conanfile), \"config_options\"):\n conanfile.config_options()\n conanfile.options.propagate_upstream(down_options, down_ref, conanref)\n if hasattr(conanfile, \"config\"):\n with conanfile_exception_formatter(str(conanfile), \"config\"):\n conanfile.config()\n\n with conanfile_exception_formatter(str(conanfile), \"configure\"):\n conanfile.configure()\n\n conanfile.settings.validate() # All has to be ok!\n conanfile.options.validate()\n\n # Update requirements (overwrites), computing new upstream\n if hasattr(conanfile, \"requirements\"):\n # If re-evaluating the recipe, in a diamond graph, with different options,\n # it could happen that one execution path of requirements() defines a package\n # and another one a different package raising Duplicate dependency error\n # Or the two consecutive calls, adding 2 different dependencies for the two paths\n # So it is necessary to save the \"requires\" state and restore it before a second\n # execution of requirements(). It is a shallow copy, if first iteration is\n # RequireResolve'd or overridden, the inner requirements are modified\n if not hasattr(conanfile, \"_conan_original_requires\"):\n conanfile._conan_original_requires = conanfile.requires.copy()\n else:\n conanfile.requires = conanfile._conan_original_requires.copy()\n\n with conanfile_exception_formatter(str(conanfile), \"requirements\"):\n conanfile.requirements()\n\n new_options = conanfile.options.deps_package_values\n new_down_reqs = conanfile.requires.update(down_reqs, self._output, conanref,\n down_ref)\n except ConanExceptionInUserConanfileMethod:\n raise\n except ConanException as e:\n raise ConanException(\"%s: %s\" % (conanref or \"Conanfile\", str(e)))\n except Exception as e:\n raise ConanException(e)\n\n return new_down_reqs, new_options\n\n def _create_new_node(self, current_node, dep_graph, requirement, public_deps, name_req, aliased,\n check_updates, update, remote_name, processed_profile, alias_ref=None):\n \"\"\" creates and adds a new node to the dependency graph\n \"\"\"\n workspace_package = self._workspace[requirement.conan_reference] if self._workspace else None\n if workspace_package:\n conanfile_path = workspace_package.conanfile_path\n recipe_status = RECIPE_WORKSPACE\n remote = WORKSPACE_FILE\n else:\n try:\n result = self._proxy.get_recipe(requirement.conan_reference,\n check_updates, update, remote_name, self._recorder)\n except ConanException as e:\n base_ref = str(current_node.conan_ref or \"PROJECT\")\n self._output.error(\"Failed requirement '%s' from '%s'\"\n % (requirement.conan_reference, base_ref))\n raise e\n conanfile_path, recipe_status, remote, _ = result\n\n output = ScopedOutput(str(requirement.conan_reference), self._output)\n dep_conanfile = self._loader.load_conanfile(conanfile_path, output, processed_profile,\n reference=requirement.conan_reference)\n\n if workspace_package:\n workspace_package.conanfile = dep_conanfile\n if getattr(dep_conanfile, \"alias\", None):\n alias_reference = alias_ref or requirement.conan_reference\n requirement.conan_reference = ConanFileReference.loads(dep_conanfile.alias)\n aliased[alias_reference] = requirement.conan_reference\n return self._create_new_node(current_node, dep_graph, requirement, public_deps,\n name_req, aliased, check_updates, update,\n remote_name, processed_profile, alias_ref=alias_reference)\n\n new_node = Node(requirement.conan_reference, dep_conanfile)\n new_node.recipe = recipe_status\n new_node.remote = remote\n dep_graph.add_node(new_node)\n dep_graph.add_edge(current_node, new_node, requirement.private)\n if not requirement.private:\n public_deps[name_req] = new_node, None\n return new_node\n", "path": "conans/client/graph/graph_builder.py"}]}
| 4,011 | 411 |
gh_patches_debug_41862
|
rasdani/github-patches
|
git_diff
|
tiangolo__fastapi-1540
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tutorial websocket doc example
**Describe the bug**
Hi,
On the docs of websocket the last example doesn't work.
**To Reproduce**
Steps to reproduce the behavior:
1. Create a file main.py with the last example on the bottom of the file
>https://fastapi.tiangolo.com/tutorial/websockets/#create-a-websocket
```python
from fastapi import Cookie, Depends, FastAPI, Header
from starlette.responses import HTMLResponse
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.websockets import WebSocket
app = FastAPI()
html = """
<!DOCTYPE html>
<html>
<head>
<title>Chat</title>
</head>
<body>
<h1>WebSocket Chat</h1>
<form action="" onsubmit="sendMessage(event)">
<label>Item ID: <input type="text" id="itemId" autocomplete="off" value="foo"/></label>
<button onclick="connect(event)">Connect</button>
<br>
<label>Message: <input type="text" id="messageText" autocomplete="off"/></label>
<button>Send</button>
</form>
<ul id='messages'>
</ul>
<script>
var ws = null;
function connect(event) {
var input = document.getElementById("itemId")
ws = new WebSocket("ws://localhost:8000/items/" + input.value + "/ws");
ws.onmessage = function(event) {
var messages = document.getElementById('messages')
var message = document.createElement('li')
var content = document.createTextNode(event.data)
message.appendChild(content)
messages.appendChild(message)
};
}
function sendMessage(event) {
var input = document.getElementById("messageText")
ws.send(input.value)
input.value = ''
event.preventDefault()
}
</script>
</body>
</html>
"""
@app.get("/")
async def get():
return HTMLResponse(html)
async def get_cookie_or_client(
websocket: WebSocket, session: str = Cookie(None), x_client: str = Header(None)
):
if session is None and x_client is None:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
return session or x_client
@app.websocket("/items/{item_id}/ws")
async def websocket_endpoint(
websocket: WebSocket,
item_id: int,
q: str = None,
cookie_or_client: str = Depends(get_cookie_or_client),
):
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(
f"Session Cookie or X-Client Header value is: {cookie_or_client}"
)
if q is not None:
await websocket.send_text(f"Query parameter q is: {q}")
await websocket.send_text(f"Message text was: {data}, for item ID: {item_id}")
```
2. Run the application with the cmd:
```
uvicorn main:app --log-level debug --reload
```
3. Open the browser 127.0.0.01
- the first time i connect with ItemID foo , press the button connect
- send message hi with ItemID foo and press the button send.
it's look like the connect fail but the second ,but the send have return code 200
but nothing happen on the web side.

4. See error
```python
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: Started reloader process [366952]
email-validator not installed, email fields will be treated as str.
To install, run: pip install email-validator
INFO: Started server process [366957]
INFO: Waiting for application startup.
DEBUG: None - ASGI [1] Started
DEBUG: None - ASGI [1] Sent {'type': 'lifespan.startup'}
DEBUG: None - ASGI [1] Received {'type': 'lifespan.startup.complete'}
DEBUG: ('127.0.0.1', 50056) - Connected
DEBUG: server - state = CONNECTING
DEBUG: server - event = connection_made(<TCPTransport closed=False reading=True 0x1819178>)
DEBUG: ('127.0.0.1', 50056) - ASGI [2] Started
DEBUG: ('127.0.0.1', 50056) - ASGI [2] Received {'type': 'websocket.close', 'code': 1008}
INFO: ('127.0.0.1', 50056) - "WebSocket /items/foo/ws" 403
DEBUG: ('127.0.0.1', 50056) - ASGI [2] Raised exception
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 147, in run_asgi
result = await self.app(self.scope, self.asgi_receive, self.asgi_send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/uvicorn/middleware/message_logger.py", line 58, in __call__
raise exc from None
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/uvicorn/middleware/message_logger.py", line 54, in __call__
await self.app(scope, inner_receive, inner_send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/applications.py", line 133, in __call__
await self.error_middleware(scope, receive, send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/middleware/errors.py", line 87, in __call__
await self.app(scope, receive, send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/exceptions.py", line 49, in __call__
await self.app(scope, receive, send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/routing.py", line 585, in __call__
await route(scope, receive, send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/routing.py", line 265, in __call__
await self.app(scope, receive, send)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/routing.py", line 56, in app
await func(session)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/fastapi/routing.py", line 148, in app
await websocket.close(code=WS_1008_POLICY_VIOLATION)
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/websockets.py", line 121, in close
await self.send({"type": "websocket.close", "code": code})
File "/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/websockets.py", line 70, in send
raise RuntimeError('Cannot call "send" once a close message has been sent.')
RuntimeError: Cannot call "send" once a close message has been sent.
DEBUG: server ! failing WebSocket connection in the CONNECTING state: 1006 [no reason]
DEBUG: ('127.0.0.1', 50058) - Connected
DEBUG: server x half-closing TCP connection
DEBUG: ('127.0.0.1', 50058) - ASGI [3] Started
DEBUG: ('127.0.0.1', 50058) - ASGI [3] Received {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: ('127.0.0.1', 50058) - "GET / HTTP/1.1" 200
DEBUG: ('127.0.0.1', 50058) - ASGI [3] Received {'type': 'http.response.body', 'body': '<1419 bytes>'}
DEBUG: ('127.0.0.1', 50058) - ASGI [3] Completed
DEBUG: server - event = eof_received()
DEBUG: server - event = connection_lost(None)
DEBUG: server - state = CLOSED
DEBUG: server x code = 1006, reason = [no reason]
DEBUG: ('127.0.0.1', 50058) - Disconnected
DEBUG: ('127.0.0.1', 50060) - Connected
DEBUG: ('127.0.0.1', 50060) - ASGI [4] Started
DEBUG: ('127.0.0.1', 50060) - ASGI [4] Received {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}
INFO: ('127.0.0.1', 50060) - "GET / HTTP/1.1" 200
DEBUG: ('127.0.0.1', 50060) - ASGI [4] Received {'type': 'http.response.body', 'body': '<1419 bytes>'}
DEBUG: ('127.0.0.1', 50060) - ASGI [4] Completed
DEBUG: ('127.0.0.1', 50060) - Disconnected
```
**Expected behavior**
expected to appear the send bold message on the web page.
**Environment:**
- OS: centos 7
- FastAPI Version [e.g. 0.3.0], get it with: fastapi==0.31.0
```Python
import fastapi
print(fastapi.__version__)
0.31.0
```
- Python version, get it with:
```bash
python --version
Python 3.7.3
```
</issue>
<code>
[start of docs_src/websockets/tutorial002.py]
1 from fastapi import Cookie, Depends, FastAPI, Header, WebSocket, status
2 from fastapi.responses import HTMLResponse
3
4 app = FastAPI()
5
6 html = """
7 <!DOCTYPE html>
8 <html>
9 <head>
10 <title>Chat</title>
11 </head>
12 <body>
13 <h1>WebSocket Chat</h1>
14 <form action="" onsubmit="sendMessage(event)">
15 <label>Item ID: <input type="text" id="itemId" autocomplete="off" value="foo"/></label>
16 <button onclick="connect(event)">Connect</button>
17 <br>
18 <label>Message: <input type="text" id="messageText" autocomplete="off"/></label>
19 <button>Send</button>
20 </form>
21 <ul id='messages'>
22 </ul>
23 <script>
24 var ws = null;
25 function connect(event) {
26 var input = document.getElementById("itemId")
27 ws = new WebSocket("ws://localhost:8000/items/" + input.value + "/ws");
28 ws.onmessage = function(event) {
29 var messages = document.getElementById('messages')
30 var message = document.createElement('li')
31 var content = document.createTextNode(event.data)
32 message.appendChild(content)
33 messages.appendChild(message)
34 };
35 }
36 function sendMessage(event) {
37 var input = document.getElementById("messageText")
38 ws.send(input.value)
39 input.value = ''
40 event.preventDefault()
41 }
42 </script>
43 </body>
44 </html>
45 """
46
47
48 @app.get("/")
49 async def get():
50 return HTMLResponse(html)
51
52
53 async def get_cookie_or_client(
54 websocket: WebSocket, session: str = Cookie(None), x_client: str = Header(None)
55 ):
56 if session is None and x_client is None:
57 await websocket.close(code=status.WS_1008_POLICY_VIOLATION)
58 return session or x_client
59
60
61 @app.websocket("/items/{item_id}/ws")
62 async def websocket_endpoint(
63 websocket: WebSocket,
64 item_id: int,
65 q: str = None,
66 cookie_or_client: str = Depends(get_cookie_or_client),
67 ):
68 await websocket.accept()
69 while True:
70 data = await websocket.receive_text()
71 await websocket.send_text(
72 f"Session Cookie or X-Client Header value is: {cookie_or_client}"
73 )
74 if q is not None:
75 await websocket.send_text(f"Query parameter q is: {q}")
76 await websocket.send_text(f"Message text was: {data}, for item ID: {item_id}")
77
[end of docs_src/websockets/tutorial002.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs_src/websockets/tutorial002.py b/docs_src/websockets/tutorial002.py
--- a/docs_src/websockets/tutorial002.py
+++ b/docs_src/websockets/tutorial002.py
@@ -1,4 +1,4 @@
-from fastapi import Cookie, Depends, FastAPI, Header, WebSocket, status
+from fastapi import Cookie, Depends, FastAPI, Query, WebSocket, status
from fastapi.responses import HTMLResponse
app = FastAPI()
@@ -13,8 +13,9 @@
<h1>WebSocket Chat</h1>
<form action="" onsubmit="sendMessage(event)">
<label>Item ID: <input type="text" id="itemId" autocomplete="off" value="foo"/></label>
+ <label>Token: <input type="text" id="token" autocomplete="off" value="some-key-token"/></label>
<button onclick="connect(event)">Connect</button>
- <br>
+ <hr>
<label>Message: <input type="text" id="messageText" autocomplete="off"/></label>
<button>Send</button>
</form>
@@ -23,8 +24,9 @@
<script>
var ws = null;
function connect(event) {
- var input = document.getElementById("itemId")
- ws = new WebSocket("ws://localhost:8000/items/" + input.value + "/ws");
+ var itemId = document.getElementById("itemId")
+ var token = document.getElementById("token")
+ ws = new WebSocket("ws://localhost:8000/items/" + itemId.value + "/ws?token=" + token.value);
ws.onmessage = function(event) {
var messages = document.getElementById('messages')
var message = document.createElement('li')
@@ -32,6 +34,7 @@
message.appendChild(content)
messages.appendChild(message)
};
+ event.preventDefault()
}
function sendMessage(event) {
var input = document.getElementById("messageText")
@@ -50,26 +53,26 @@
return HTMLResponse(html)
-async def get_cookie_or_client(
- websocket: WebSocket, session: str = Cookie(None), x_client: str = Header(None)
+async def get_cookie_or_token(
+ websocket: WebSocket, session: str = Cookie(None), token: str = Query(None)
):
- if session is None and x_client is None:
+ if session is None and token is None:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION)
- return session or x_client
+ return session or token
@app.websocket("/items/{item_id}/ws")
async def websocket_endpoint(
websocket: WebSocket,
- item_id: int,
- q: str = None,
- cookie_or_client: str = Depends(get_cookie_or_client),
+ item_id: str,
+ q: int = None,
+ cookie_or_token: str = Depends(get_cookie_or_token),
):
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(
- f"Session Cookie or X-Client Header value is: {cookie_or_client}"
+ f"Session cookie or query token value is: {cookie_or_token}"
)
if q is not None:
await websocket.send_text(f"Query parameter q is: {q}")
|
{"golden_diff": "diff --git a/docs_src/websockets/tutorial002.py b/docs_src/websockets/tutorial002.py\n--- a/docs_src/websockets/tutorial002.py\n+++ b/docs_src/websockets/tutorial002.py\n@@ -1,4 +1,4 @@\n-from fastapi import Cookie, Depends, FastAPI, Header, WebSocket, status\n+from fastapi import Cookie, Depends, FastAPI, Query, WebSocket, status\n from fastapi.responses import HTMLResponse\n \n app = FastAPI()\n@@ -13,8 +13,9 @@\n <h1>WebSocket Chat</h1>\n <form action=\"\" onsubmit=\"sendMessage(event)\">\n <label>Item ID: <input type=\"text\" id=\"itemId\" autocomplete=\"off\" value=\"foo\"/></label>\n+ <label>Token: <input type=\"text\" id=\"token\" autocomplete=\"off\" value=\"some-key-token\"/></label>\n <button onclick=\"connect(event)\">Connect</button>\n- <br>\n+ <hr>\n <label>Message: <input type=\"text\" id=\"messageText\" autocomplete=\"off\"/></label>\n <button>Send</button>\n </form>\n@@ -23,8 +24,9 @@\n <script>\n var ws = null;\n function connect(event) {\n- var input = document.getElementById(\"itemId\")\n- ws = new WebSocket(\"ws://localhost:8000/items/\" + input.value + \"/ws\");\n+ var itemId = document.getElementById(\"itemId\")\n+ var token = document.getElementById(\"token\")\n+ ws = new WebSocket(\"ws://localhost:8000/items/\" + itemId.value + \"/ws?token=\" + token.value);\n ws.onmessage = function(event) {\n var messages = document.getElementById('messages')\n var message = document.createElement('li')\n@@ -32,6 +34,7 @@\n message.appendChild(content)\n messages.appendChild(message)\n };\n+ event.preventDefault()\n }\n function sendMessage(event) {\n var input = document.getElementById(\"messageText\")\n@@ -50,26 +53,26 @@\n return HTMLResponse(html)\n \n \n-async def get_cookie_or_client(\n- websocket: WebSocket, session: str = Cookie(None), x_client: str = Header(None)\n+async def get_cookie_or_token(\n+ websocket: WebSocket, session: str = Cookie(None), token: str = Query(None)\n ):\n- if session is None and x_client is None:\n+ if session is None and token is None:\n await websocket.close(code=status.WS_1008_POLICY_VIOLATION)\n- return session or x_client\n+ return session or token\n \n \n @app.websocket(\"/items/{item_id}/ws\")\n async def websocket_endpoint(\n websocket: WebSocket,\n- item_id: int,\n- q: str = None,\n- cookie_or_client: str = Depends(get_cookie_or_client),\n+ item_id: str,\n+ q: int = None,\n+ cookie_or_token: str = Depends(get_cookie_or_token),\n ):\n await websocket.accept()\n while True:\n data = await websocket.receive_text()\n await websocket.send_text(\n- f\"Session Cookie or X-Client Header value is: {cookie_or_client}\"\n+ f\"Session cookie or query token value is: {cookie_or_token}\"\n )\n if q is not None:\n await websocket.send_text(f\"Query parameter q is: {q}\")\n", "issue": "Tutorial websocket doc example\n**Describe the bug**\r\nHi, \r\nOn the docs of websocket the last example doesn't work.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a file main.py with the last example on the bottom of the file\r\n\r\n>https://fastapi.tiangolo.com/tutorial/websockets/#create-a-websocket\r\n```python\r\nfrom fastapi import Cookie, Depends, FastAPI, Header\r\nfrom starlette.responses import HTMLResponse\r\nfrom starlette.status import WS_1008_POLICY_VIOLATION\r\nfrom starlette.websockets import WebSocket\r\n\r\napp = FastAPI()\r\n\r\nhtml = \"\"\"\r\n<!DOCTYPE html>\r\n<html>\r\n <head>\r\n <title>Chat</title>\r\n </head>\r\n <body>\r\n <h1>WebSocket Chat</h1>\r\n <form action=\"\" onsubmit=\"sendMessage(event)\">\r\n <label>Item ID: <input type=\"text\" id=\"itemId\" autocomplete=\"off\" value=\"foo\"/></label>\r\n <button onclick=\"connect(event)\">Connect</button>\r\n <br>\r\n <label>Message: <input type=\"text\" id=\"messageText\" autocomplete=\"off\"/></label>\r\n <button>Send</button>\r\n </form>\r\n <ul id='messages'>\r\n </ul>\r\n <script>\r\n var ws = null;\r\n function connect(event) {\r\n var input = document.getElementById(\"itemId\")\r\n ws = new WebSocket(\"ws://localhost:8000/items/\" + input.value + \"/ws\");\r\n ws.onmessage = function(event) {\r\n var messages = document.getElementById('messages')\r\n var message = document.createElement('li')\r\n var content = document.createTextNode(event.data)\r\n message.appendChild(content)\r\n messages.appendChild(message)\r\n };\r\n }\r\n function sendMessage(event) {\r\n var input = document.getElementById(\"messageText\")\r\n ws.send(input.value)\r\n input.value = ''\r\n event.preventDefault()\r\n }\r\n </script>\r\n </body>\r\n</html>\r\n\"\"\"\r\n\r\n\r\[email protected](\"/\")\r\nasync def get():\r\n return HTMLResponse(html)\r\n\r\n\r\nasync def get_cookie_or_client(\r\n websocket: WebSocket, session: str = Cookie(None), x_client: str = Header(None)\r\n):\r\n if session is None and x_client is None:\r\n await websocket.close(code=WS_1008_POLICY_VIOLATION)\r\n return session or x_client\r\n\r\n\r\[email protected](\"/items/{item_id}/ws\")\r\nasync def websocket_endpoint(\r\n websocket: WebSocket,\r\n item_id: int,\r\n q: str = None,\r\n cookie_or_client: str = Depends(get_cookie_or_client),\r\n):\r\n await websocket.accept()\r\n while True:\r\n data = await websocket.receive_text()\r\n await websocket.send_text(\r\n f\"Session Cookie or X-Client Header value is: {cookie_or_client}\"\r\n )\r\n if q is not None:\r\n await websocket.send_text(f\"Query parameter q is: {q}\")\r\n await websocket.send_text(f\"Message text was: {data}, for item ID: {item_id}\")\r\n``` \r\n2. Run the application with the cmd:\r\n```\r\nuvicorn main:app --log-level debug --reload\r\n```\r\n3. Open the browser 127.0.0.01\r\n - the first time i connect with ItemID foo , press the button connect \r\n - send message hi with ItemID foo and press the button send.\r\n it's look like the connect fail but the second ,but the send have return code 200\r\n but nothing happen on the web side. \r\n\r\n\r\n\r\n4. See error\r\n```python\r\n\r\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\r\nINFO: Started reloader process [366952]\r\nemail-validator not installed, email fields will be treated as str.\r\nTo install, run: pip install email-validator\r\nINFO: Started server process [366957]\r\nINFO: Waiting for application startup.\r\nDEBUG: None - ASGI [1] Started\r\nDEBUG: None - ASGI [1] Sent {'type': 'lifespan.startup'}\r\nDEBUG: None - ASGI [1] Received {'type': 'lifespan.startup.complete'}\r\nDEBUG: ('127.0.0.1', 50056) - Connected\r\nDEBUG: server - state = CONNECTING\r\nDEBUG: server - event = connection_made(<TCPTransport closed=False reading=True 0x1819178>)\r\nDEBUG: ('127.0.0.1', 50056) - ASGI [2] Started\r\nDEBUG: ('127.0.0.1', 50056) - ASGI [2] Received {'type': 'websocket.close', 'code': 1008}\r\nINFO: ('127.0.0.1', 50056) - \"WebSocket /items/foo/ws\" 403\r\nDEBUG: ('127.0.0.1', 50056) - ASGI [2] Raised exception\r\nERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py\", line 147, in run_asgi\r\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/uvicorn/middleware/message_logger.py\", line 58, in __call__\r\n raise exc from None\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/uvicorn/middleware/message_logger.py\", line 54, in __call__\r\n await self.app(scope, inner_receive, inner_send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/applications.py\", line 133, in __call__\r\n await self.error_middleware(scope, receive, send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/middleware/errors.py\", line 87, in __call__\r\n await self.app(scope, receive, send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/exceptions.py\", line 49, in __call__\r\n await self.app(scope, receive, send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/routing.py\", line 585, in __call__\r\n await route(scope, receive, send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/routing.py\", line 265, in __call__\r\n await self.app(scope, receive, send)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/routing.py\", line 56, in app\r\n await func(session)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/fastapi/routing.py\", line 148, in app\r\n await websocket.close(code=WS_1008_POLICY_VIOLATION)\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/websockets.py\", line 121, in close\r\n await self.send({\"type\": \"websocket.close\", \"code\": code})\r\n File \"/data/experiments/realtime_web_socket/lib/python3.7/site-packages/starlette/websockets.py\", line 70, in send\r\n raise RuntimeError('Cannot call \"send\" once a close message has been sent.')\r\nRuntimeError: Cannot call \"send\" once a close message has been sent.\r\nDEBUG: server ! failing WebSocket connection in the CONNECTING state: 1006 [no reason]\r\nDEBUG: ('127.0.0.1', 50058) - Connected\r\nDEBUG: server x half-closing TCP connection\r\nDEBUG: ('127.0.0.1', 50058) - ASGI [3] Started\r\nDEBUG: ('127.0.0.1', 50058) - ASGI [3] Received {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}\r\nINFO: ('127.0.0.1', 50058) - \"GET / HTTP/1.1\" 200\r\nDEBUG: ('127.0.0.1', 50058) - ASGI [3] Received {'type': 'http.response.body', 'body': '<1419 bytes>'}\r\nDEBUG: ('127.0.0.1', 50058) - ASGI [3] Completed\r\nDEBUG: server - event = eof_received()\r\nDEBUG: server - event = connection_lost(None)\r\nDEBUG: server - state = CLOSED\r\nDEBUG: server x code = 1006, reason = [no reason]\r\nDEBUG: ('127.0.0.1', 50058) - Disconnected\r\nDEBUG: ('127.0.0.1', 50060) - Connected\r\nDEBUG: ('127.0.0.1', 50060) - ASGI [4] Started\r\nDEBUG: ('127.0.0.1', 50060) - ASGI [4] Received {'type': 'http.response.start', 'status': 200, 'headers': '<...>'}\r\nINFO: ('127.0.0.1', 50060) - \"GET / HTTP/1.1\" 200\r\nDEBUG: ('127.0.0.1', 50060) - ASGI [4] Received {'type': 'http.response.body', 'body': '<1419 bytes>'}\r\nDEBUG: ('127.0.0.1', 50060) - ASGI [4] Completed\r\nDEBUG: ('127.0.0.1', 50060) - Disconnected\r\n\r\n\r\n```\r\n**Expected behavior**\r\nexpected to appear the send bold message on the web page.\r\n\r\n**Environment:**\r\n - OS: centos 7\r\n - FastAPI Version [e.g. 0.3.0], get it with: fastapi==0.31.0\r\n\r\n```Python\r\nimport fastapi\r\nprint(fastapi.__version__)\r\n0.31.0\r\n```\r\n\r\n- Python version, get it with:\r\n\r\n```bash\r\npython --version\r\nPython 3.7.3\r\n```\r\n\r\n\n", "before_files": [{"content": "from fastapi import Cookie, Depends, FastAPI, Header, WebSocket, status\nfrom fastapi.responses import HTMLResponse\n\napp = FastAPI()\n\nhtml = \"\"\"\n<!DOCTYPE html>\n<html>\n <head>\n <title>Chat</title>\n </head>\n <body>\n <h1>WebSocket Chat</h1>\n <form action=\"\" onsubmit=\"sendMessage(event)\">\n <label>Item ID: <input type=\"text\" id=\"itemId\" autocomplete=\"off\" value=\"foo\"/></label>\n <button onclick=\"connect(event)\">Connect</button>\n <br>\n <label>Message: <input type=\"text\" id=\"messageText\" autocomplete=\"off\"/></label>\n <button>Send</button>\n </form>\n <ul id='messages'>\n </ul>\n <script>\n var ws = null;\n function connect(event) {\n var input = document.getElementById(\"itemId\")\n ws = new WebSocket(\"ws://localhost:8000/items/\" + input.value + \"/ws\");\n ws.onmessage = function(event) {\n var messages = document.getElementById('messages')\n var message = document.createElement('li')\n var content = document.createTextNode(event.data)\n message.appendChild(content)\n messages.appendChild(message)\n };\n }\n function sendMessage(event) {\n var input = document.getElementById(\"messageText\")\n ws.send(input.value)\n input.value = ''\n event.preventDefault()\n }\n </script>\n </body>\n</html>\n\"\"\"\n\n\[email protected](\"/\")\nasync def get():\n return HTMLResponse(html)\n\n\nasync def get_cookie_or_client(\n websocket: WebSocket, session: str = Cookie(None), x_client: str = Header(None)\n):\n if session is None and x_client is None:\n await websocket.close(code=status.WS_1008_POLICY_VIOLATION)\n return session or x_client\n\n\[email protected](\"/items/{item_id}/ws\")\nasync def websocket_endpoint(\n websocket: WebSocket,\n item_id: int,\n q: str = None,\n cookie_or_client: str = Depends(get_cookie_or_client),\n):\n await websocket.accept()\n while True:\n data = await websocket.receive_text()\n await websocket.send_text(\n f\"Session Cookie or X-Client Header value is: {cookie_or_client}\"\n )\n if q is not None:\n await websocket.send_text(f\"Query parameter q is: {q}\")\n await websocket.send_text(f\"Message text was: {data}, for item ID: {item_id}\")\n", "path": "docs_src/websockets/tutorial002.py"}]}
| 3,611 | 739 |
gh_patches_debug_19507
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-269
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Config to capture query string, not just path
When a request comes in, we capture `request.path`, which doesn't appear to capture any query string.
We should match the Ruby agent:
* Default to capturing the entire path and query string
* Config to not capture query string https://docs.scoutapm.com/#uri_reporting
</issue>
<code>
[start of src/scout_apm/core/web_requests.py]
1 # coding=utf-8
2 from scout_apm.compat import urlencode
3
4 # Originally derived from:
5 # 1. Rails:
6 # https://github.com/rails/rails/blob/0196551e6039ca864d1eee1e01819fcae12c1dc9/railties/lib/rails/generators/rails/app/templates/config/initializers/filter_parameter_logging.rb.tt
7 # 2. Sentry server side scrubbing:
8 # https://docs.sentry.io/data-management/sensitive-data/#server-side-scrubbing
9 FILTER_PARAMETERS = frozenset(
10 [
11 "access",
12 "access_token",
13 "api_key",
14 "apikey",
15 "auth",
16 "auth_token",
17 "card[number]",
18 "certificate",
19 "credentials",
20 "crypt",
21 "key",
22 "mysql_pwd",
23 "otp",
24 "passwd",
25 "password",
26 "private",
27 "protected",
28 "salt",
29 "secret",
30 "ssn",
31 "stripetoken",
32 "token",
33 ]
34 )
35
36
37 def create_filtered_path(path, query_params):
38 filtered_params = sorted(
39 (
40 (k, "[FILTERED]" if k.lower() in FILTER_PARAMETERS else v)
41 for k, v in query_params
42 )
43 )
44 if not filtered_params:
45 return path
46 return path + "?" + urlencode(filtered_params)
47
[end of src/scout_apm/core/web_requests.py]
[start of src/scout_apm/core/config.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5 import os
6
7 from scout_apm.compat import string_type
8 from scout_apm.core import platform_detection
9 from scout_apm.core.util import octal
10
11 logger = logging.getLogger(__name__)
12
13
14 class ScoutConfig(object):
15 """
16 Configuration object for the ScoutApm agent.
17
18 Contains a list of configuration "layers". When a configuration key is
19 looked up, each layer is asked in turn if it knows the value. The first one
20 to answer affirmatively returns the value.
21 """
22
23 def __init__(self):
24 self.layers = [
25 ScoutConfigEnv(),
26 ScoutConfigPython(),
27 ScoutConfigDerived(self),
28 ScoutConfigDefaults(),
29 ScoutConfigNull(),
30 ]
31
32 def value(self, key):
33 value = self.locate_layer_for_key(key).value(key)
34 if key in CONVERSIONS:
35 return CONVERSIONS[key](value)
36 return value
37
38 def locate_layer_for_key(self, key):
39 for layer in self.layers:
40 if layer.has_config(key):
41 return layer
42
43 # Should be unreachable because ScoutConfigNull returns None for all
44 # keys.
45 raise ValueError("key {!r} not found in any layer".format(key))
46
47 def log(self):
48 logger.debug("Configuration Loaded:")
49 for key in self.known_keys():
50 layer = self.locate_layer_for_key(key)
51 logger.debug("%-9s: %s = %s", layer.name(), key, layer.value(key))
52
53 def known_keys(self):
54 return [
55 "app_server",
56 "application_root",
57 "core_agent_dir",
58 "core_agent_download",
59 "core_agent_launch",
60 "core_agent_permissions",
61 "core_agent_version",
62 "disabled_instruments",
63 "download_url",
64 "framework",
65 "framework_version",
66 "hostname",
67 "ignore",
68 "key",
69 "log_level",
70 "monitor",
71 "name",
72 "revision_sha",
73 "scm_subdirectory",
74 "socket_path",
75 ]
76
77 def core_agent_permissions(self):
78 try:
79 return octal(self.value("core_agent_permissions"))
80 except ValueError:
81 logger.exception(
82 "Invalid core_agent_permissions value, using default of 0o700"
83 )
84 return 0o700
85
86 @classmethod
87 def set(cls, **kwargs):
88 """
89 Sets a configuration value for the Scout agent. Values set here will
90 not override values set in ENV.
91 """
92 global SCOUT_PYTHON_VALUES
93 for key, value in kwargs.items():
94 SCOUT_PYTHON_VALUES[key] = value
95
96 @classmethod
97 def unset(cls, *keys):
98 """
99 Removes a configuration value for the Scout agent.
100 """
101 global SCOUT_PYTHON_VALUES
102 for key in keys:
103 SCOUT_PYTHON_VALUES.pop(key, None)
104
105 @classmethod
106 def reset_all(cls):
107 """
108 Remove all configuration settings set via `ScoutConfig.set(...)`.
109
110 This is meant for use in testing.
111 """
112 global SCOUT_PYTHON_VALUES
113 SCOUT_PYTHON_VALUES.clear()
114
115
116 # Module-level data, the ScoutConfig.set(key="value") adds to this
117 SCOUT_PYTHON_VALUES = {}
118
119
120 class ScoutConfigPython(object):
121 """
122 A configuration overlay that lets other parts of python set values.
123 """
124
125 def name(self):
126 return "Python"
127
128 def has_config(self, key):
129 return key in SCOUT_PYTHON_VALUES
130
131 def value(self, key):
132 return SCOUT_PYTHON_VALUES[key]
133
134
135 class ScoutConfigEnv(object):
136 """
137 Reads configuration from environment by prefixing the key
138 requested with "SCOUT_"
139
140 Example: the `log_level` config looks for SCOUT_LOG_LEVEL
141 environment variable
142 """
143
144 def name(self):
145 return "ENV"
146
147 def has_config(self, key):
148 env_key = self.modify_key(key)
149 return env_key in os.environ
150
151 def value(self, key):
152 env_key = self.modify_key(key)
153 return os.environ[env_key]
154
155 def modify_key(self, key):
156 env_key = ("SCOUT_" + key).upper()
157 return env_key
158
159
160 class ScoutConfigDerived(object):
161 """
162 A configuration overlay that calculates from other values.
163 """
164
165 def __init__(self, config):
166 """
167 config argument is the overall ScoutConfig var, so we can lookup the
168 components of the derived info.
169 """
170 self.config = config
171
172 def name(self):
173 return "Derived"
174
175 def has_config(self, key):
176 return self.lookup_func(key) is not None
177
178 def value(self, key):
179 return self.lookup_func(key)()
180
181 def lookup_func(self, key):
182 """
183 Returns the derive_#{key} function, or None if it isn't defined
184 """
185 func_name = "derive_" + key
186 return getattr(self, func_name, None)
187
188 def derive_socket_path(self):
189 return "{}/{}/scout-agent.sock".format(
190 self.config.value("core_agent_dir"),
191 self.config.value("core_agent_full_name"),
192 )
193
194 def derive_core_agent_full_name(self):
195 triple = self.config.value("core_agent_triple")
196 if not platform_detection.is_valid_triple(triple):
197 logger.warning("Invalid value for core_agent_triple: %s", triple)
198 return "{name}-{version}-{triple}".format(
199 name="scout_apm_core",
200 version=self.config.value("core_agent_version"),
201 triple=triple,
202 )
203
204 def derive_core_agent_triple(self):
205 return platform_detection.get_triple()
206
207
208 class ScoutConfigDefaults(object):
209 """
210 Provides default values for important configurations
211 """
212
213 def name(self):
214 return "Defaults"
215
216 def __init__(self):
217 self.defaults = {
218 "app_server": "",
219 "application_root": "",
220 "core_agent_dir": "/tmp/scout_apm_core",
221 "core_agent_download": True,
222 "core_agent_launch": True,
223 "core_agent_permissions": 700,
224 "core_agent_version": "v1.2.0", # can be an exact tag name, or 'latest'
225 "disabled_instruments": [],
226 "download_url": "https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release", # noqa: E501
227 "framework": "",
228 "framework_version": "",
229 "hostname": None,
230 "key": "",
231 "log_level": "info",
232 "monitor": False,
233 "name": "",
234 "revision_sha": self._git_revision_sha(),
235 "scm_subdirectory": "",
236 }
237
238 def _git_revision_sha(self):
239 # N.B. The environment variable SCOUT_REVISION_SHA may also be used,
240 # but that will be picked up by ScoutConfigEnv
241 return os.environ.get("HEROKU_SLUG_COMMIT", "")
242
243 def has_config(self, key):
244 return key in self.defaults
245
246 def value(self, key):
247 return self.defaults[key]
248
249
250 # Always returns None to any key
251 class ScoutConfigNull(object):
252 """
253 Always answers that a key is present, but the value is None
254
255 Used as the last step of the layered configuration.
256 """
257
258 def name(self):
259 return "Null"
260
261 def has_config(self, key):
262 return True
263
264 def value(self, key):
265 return None
266
267
268 def convert_to_bool(value):
269 if isinstance(value, bool):
270 return value
271 if isinstance(value, string_type):
272 return value.lower() in ("yes", "true", "t", "1")
273 # Unknown type - default to false?
274 return False
275
276
277 def convert_to_list(value):
278 if isinstance(value, list):
279 return value
280 if isinstance(value, tuple):
281 return list(value)
282 if isinstance(value, string_type):
283 # Split on commas
284 return [item.strip() for item in value.split(",") if item]
285 # Unknown type - default to empty?
286 return []
287
288
289 CONVERSIONS = {
290 "core_agent_download": convert_to_bool,
291 "core_agent_launch": convert_to_bool,
292 "monitor": convert_to_bool,
293 "disabled_instruments": convert_to_list,
294 "ignore": convert_to_list,
295 }
296
[end of src/scout_apm/core/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py
--- a/src/scout_apm/core/config.py
+++ b/src/scout_apm/core/config.py
@@ -233,6 +233,7 @@
"name": "",
"revision_sha": self._git_revision_sha(),
"scm_subdirectory": "",
+ "uri_reporting": "filtered_params",
}
def _git_revision_sha(self):
diff --git a/src/scout_apm/core/web_requests.py b/src/scout_apm/core/web_requests.py
--- a/src/scout_apm/core/web_requests.py
+++ b/src/scout_apm/core/web_requests.py
@@ -1,5 +1,6 @@
# coding=utf-8
from scout_apm.compat import urlencode
+from scout_apm.core.context import AgentContext
# Originally derived from:
# 1. Rails:
@@ -35,6 +36,8 @@
def create_filtered_path(path, query_params):
+ if AgentContext.instance.config.value("uri_reporting") == "path":
+ return path
filtered_params = sorted(
(
(k, "[FILTERED]" if k.lower() in FILTER_PARAMETERS else v)
|
{"golden_diff": "diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py\n--- a/src/scout_apm/core/config.py\n+++ b/src/scout_apm/core/config.py\n@@ -233,6 +233,7 @@\n \"name\": \"\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n+ \"uri_reporting\": \"filtered_params\",\n }\n \n def _git_revision_sha(self):\ndiff --git a/src/scout_apm/core/web_requests.py b/src/scout_apm/core/web_requests.py\n--- a/src/scout_apm/core/web_requests.py\n+++ b/src/scout_apm/core/web_requests.py\n@@ -1,5 +1,6 @@\n # coding=utf-8\n from scout_apm.compat import urlencode\n+from scout_apm.core.context import AgentContext\n \n # Originally derived from:\n # 1. Rails:\n@@ -35,6 +36,8 @@\n \n \n def create_filtered_path(path, query_params):\n+ if AgentContext.instance.config.value(\"uri_reporting\") == \"path\":\n+ return path\n filtered_params = sorted(\n (\n (k, \"[FILTERED]\" if k.lower() in FILTER_PARAMETERS else v)\n", "issue": "Config to capture query string, not just path\nWhen a request comes in, we capture `request.path`, which doesn't appear to capture any query string.\r\n\r\nWe should match the Ruby agent:\r\n* Default to capturing the entire path and query string\r\n* Config to not capture query string https://docs.scoutapm.com/#uri_reporting\n", "before_files": [{"content": "# coding=utf-8\nfrom scout_apm.compat import urlencode\n\n# Originally derived from:\n# 1. Rails:\n# https://github.com/rails/rails/blob/0196551e6039ca864d1eee1e01819fcae12c1dc9/railties/lib/rails/generators/rails/app/templates/config/initializers/filter_parameter_logging.rb.tt\n# 2. Sentry server side scrubbing:\n# https://docs.sentry.io/data-management/sensitive-data/#server-side-scrubbing\nFILTER_PARAMETERS = frozenset(\n [\n \"access\",\n \"access_token\",\n \"api_key\",\n \"apikey\",\n \"auth\",\n \"auth_token\",\n \"card[number]\",\n \"certificate\",\n \"credentials\",\n \"crypt\",\n \"key\",\n \"mysql_pwd\",\n \"otp\",\n \"passwd\",\n \"password\",\n \"private\",\n \"protected\",\n \"salt\",\n \"secret\",\n \"ssn\",\n \"stripetoken\",\n \"token\",\n ]\n)\n\n\ndef create_filtered_path(path, query_params):\n filtered_params = sorted(\n (\n (k, \"[FILTERED]\" if k.lower() in FILTER_PARAMETERS else v)\n for k, v in query_params\n )\n )\n if not filtered_params:\n return path\n return path + \"?\" + urlencode(filtered_params)\n", "path": "src/scout_apm/core/web_requests.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\n\nfrom scout_apm.compat import string_type\nfrom scout_apm.core import platform_detection\nfrom scout_apm.core.util import octal\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n ScoutConfigEnv(),\n ScoutConfigPython(),\n ScoutConfigDerived(self),\n ScoutConfigDefaults(),\n ScoutConfigNull(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n return CONVERSIONS[key](value)\n return value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n\n # Should be unreachable because ScoutConfigNull returns None for all\n # keys.\n raise ValueError(\"key {!r} not found in any layer\".format(key))\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\"%-9s: %s = %s\", layer.name(), key, layer.value(key))\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_permissions\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"socket_path\",\n ]\n\n def core_agent_permissions(self):\n try:\n return octal(self.value(\"core_agent_permissions\"))\n except ValueError:\n logger.exception(\n \"Invalid core_agent_permissions value, using default of 0o700\"\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n global SCOUT_PYTHON_VALUES\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def unset(cls, *keys):\n \"\"\"\n Removes a configuration value for the Scout agent.\n \"\"\"\n global SCOUT_PYTHON_VALUES\n for key in keys:\n SCOUT_PYTHON_VALUES.pop(key, None)\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n global SCOUT_PYTHON_VALUES\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass ScoutConfigPython(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def name(self):\n return \"Python\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass ScoutConfigEnv(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `log_level` config looks for SCOUT_LOG_LEVEL\n environment variable\n \"\"\"\n\n def name(self):\n return \"ENV\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass ScoutConfigDerived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def name(self):\n return \"Derived\"\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_socket_path(self):\n return \"{}/{}/scout-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n triple = self.config.value(\"core_agent_triple\")\n if not platform_detection.is_valid_triple(triple):\n logger.warning(\"Invalid value for core_agent_triple: %s\", triple)\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=triple,\n )\n\n def derive_core_agent_triple(self):\n return platform_detection.get_triple()\n\n\nclass ScoutConfigDefaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def name(self):\n return \"Defaults\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.2.0\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": None,\n \"key\": \"\",\n \"log_level\": \"info\",\n \"monitor\": False,\n \"name\": \"\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n }\n\n def _git_revision_sha(self):\n # N.B. The environment variable SCOUT_REVISION_SHA may also be used,\n # but that will be picked up by ScoutConfigEnv\n return os.environ.get(\"HEROKU_SLUG_COMMIT\", \"\")\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\n# Always returns None to any key\nclass ScoutConfigNull(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def name(self):\n return \"Null\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\ndef convert_to_bool(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\ndef convert_to_list(value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": convert_to_bool,\n \"core_agent_launch\": convert_to_bool,\n \"monitor\": convert_to_bool,\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n}\n", "path": "src/scout_apm/core/config.py"}]}
| 3,620 | 266 |
gh_patches_debug_20887
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-19155
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make muting work for deactivated users
Even after a user deactivated, it may be helpful to hide that user's messages, avatar, etc. We should therefore make it possible to mute deactivated users.
This will also fix a bug where if a user tries to mute a deactivated user, the action silently fails.
</issue>
<code>
[start of zerver/views/muting.py]
1 import datetime
2 from typing import Optional
3
4 from django.http import HttpRequest, HttpResponse
5 from django.utils.timezone import now as timezone_now
6 from django.utils.translation import gettext as _
7
8 from zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user
9 from zerver.lib.exceptions import JsonableError
10 from zerver.lib.request import REQ, has_request_variables
11 from zerver.lib.response import json_success
12 from zerver.lib.streams import (
13 access_stream_by_id,
14 access_stream_by_name,
15 access_stream_for_unmute_topic_by_id,
16 access_stream_for_unmute_topic_by_name,
17 check_for_exactly_one_stream_arg,
18 )
19 from zerver.lib.topic_mutes import topic_is_muted
20 from zerver.lib.user_mutes import get_mute_object
21 from zerver.lib.users import access_user_by_id
22 from zerver.lib.validator import check_int
23 from zerver.models import UserProfile
24
25
26 def mute_topic(
27 user_profile: UserProfile,
28 stream_id: Optional[int],
29 stream_name: Optional[str],
30 topic_name: str,
31 date_muted: datetime.datetime,
32 ) -> HttpResponse:
33 if stream_name is not None:
34 (stream, sub) = access_stream_by_name(user_profile, stream_name)
35 else:
36 assert stream_id is not None
37 (stream, sub) = access_stream_by_id(user_profile, stream_id)
38
39 if topic_is_muted(user_profile, stream.id, topic_name):
40 raise JsonableError(_("Topic already muted"))
41
42 do_mute_topic(user_profile, stream, topic_name, date_muted)
43 return json_success()
44
45
46 def unmute_topic(
47 user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str
48 ) -> HttpResponse:
49 error = _("Topic is not muted")
50
51 if stream_name is not None:
52 stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
53 else:
54 assert stream_id is not None
55 stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
56
57 if not topic_is_muted(user_profile, stream.id, topic_name):
58 raise JsonableError(error)
59
60 do_unmute_topic(user_profile, stream, topic_name)
61 return json_success()
62
63
64 @has_request_variables
65 def update_muted_topic(
66 request: HttpRequest,
67 user_profile: UserProfile,
68 stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
69 stream: Optional[str] = REQ(default=None),
70 topic: str = REQ(),
71 op: str = REQ(),
72 ) -> HttpResponse:
73
74 check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
75
76 if op == "add":
77 return mute_topic(
78 user_profile=user_profile,
79 stream_id=stream_id,
80 stream_name=stream,
81 topic_name=topic,
82 date_muted=timezone_now(),
83 )
84 elif op == "remove":
85 return unmute_topic(
86 user_profile=user_profile,
87 stream_id=stream_id,
88 stream_name=stream,
89 topic_name=topic,
90 )
91
92
93 def mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:
94 if user_profile.id == muted_user_id:
95 raise JsonableError(_("Cannot mute self"))
96
97 muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
98 date_muted = timezone_now()
99
100 if get_mute_object(user_profile, muted_user) is not None:
101 raise JsonableError(_("User already muted"))
102
103 do_mute_user(user_profile, muted_user, date_muted)
104 return json_success()
105
106
107 def unmute_user(
108 request: HttpRequest, user_profile: UserProfile, muted_user_id: int
109 ) -> HttpResponse:
110 muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
111 mute_object = get_mute_object(user_profile, muted_user)
112
113 if mute_object is None:
114 raise JsonableError(_("User is not muted"))
115
116 do_unmute_user(mute_object)
117 return json_success()
118
[end of zerver/views/muting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zerver/views/muting.py b/zerver/views/muting.py
--- a/zerver/views/muting.py
+++ b/zerver/views/muting.py
@@ -94,7 +94,9 @@
if user_profile.id == muted_user_id:
raise JsonableError(_("Cannot mute self"))
- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
+ muted_user = access_user_by_id(
+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
+ )
date_muted = timezone_now()
if get_mute_object(user_profile, muted_user) is not None:
@@ -107,7 +109,9 @@
def unmute_user(
request: HttpRequest, user_profile: UserProfile, muted_user_id: int
) -> HttpResponse:
- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
+ muted_user = access_user_by_id(
+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
+ )
mute_object = get_mute_object(user_profile, muted_user)
if mute_object is None:
|
{"golden_diff": "diff --git a/zerver/views/muting.py b/zerver/views/muting.py\n--- a/zerver/views/muting.py\n+++ b/zerver/views/muting.py\n@@ -94,7 +94,9 @@\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n \n- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n+ muted_user = access_user_by_id(\n+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n+ )\n date_muted = timezone_now()\n \n if get_mute_object(user_profile, muted_user) is not None:\n@@ -107,7 +109,9 @@\n def unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n ) -> HttpResponse:\n- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n+ muted_user = access_user_by_id(\n+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n+ )\n mute_object = get_mute_object(user_profile, muted_user)\n \n if mute_object is None:\n", "issue": "Make muting work for deactivated users\nEven after a user deactivated, it may be helpful to hide that user's messages, avatar, etc. We should therefore make it possible to mute deactivated users.\r\n\r\nThis will also fix a bug where if a user tries to mute a deactivated user, the action silently fails.\n", "before_files": [{"content": "import datetime\nfrom typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.translation import gettext as _\n\nfrom zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.streams import (\n access_stream_by_id,\n access_stream_by_name,\n access_stream_for_unmute_topic_by_id,\n access_stream_for_unmute_topic_by_name,\n check_for_exactly_one_stream_arg,\n)\nfrom zerver.lib.topic_mutes import topic_is_muted\nfrom zerver.lib.user_mutes import get_mute_object\nfrom zerver.lib.users import access_user_by_id\nfrom zerver.lib.validator import check_int\nfrom zerver.models import UserProfile\n\n\ndef mute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n date_muted: datetime.datetime,\n) -> HttpResponse:\n if stream_name is not None:\n (stream, sub) = access_stream_by_name(user_profile, stream_name)\n else:\n assert stream_id is not None\n (stream, sub) = access_stream_by_id(user_profile, stream_id)\n\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n\n do_mute_topic(user_profile, stream, topic_name, date_muted)\n return json_success()\n\n\ndef unmute_topic(\n user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str\n) -> HttpResponse:\n error = _(\"Topic is not muted\")\n\n if stream_name is not None:\n stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)\n else:\n assert stream_id is not None\n stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)\n\n if not topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(error)\n\n do_unmute_topic(user_profile, stream, topic_name)\n return json_success()\n\n\n@has_request_variables\ndef update_muted_topic(\n request: HttpRequest,\n user_profile: UserProfile,\n stream_id: Optional[int] = REQ(json_validator=check_int, default=None),\n stream: Optional[str] = REQ(default=None),\n topic: str = REQ(),\n op: str = REQ(),\n) -> HttpResponse:\n\n check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)\n\n if op == \"add\":\n return mute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n date_muted=timezone_now(),\n )\n elif op == \"remove\":\n return unmute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n )\n\n\ndef mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n\n muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n date_muted = timezone_now()\n\n if get_mute_object(user_profile, muted_user) is not None:\n raise JsonableError(_(\"User already muted\"))\n\n do_mute_user(user_profile, muted_user, date_muted)\n return json_success()\n\n\ndef unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n) -> HttpResponse:\n muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n mute_object = get_mute_object(user_profile, muted_user)\n\n if mute_object is None:\n raise JsonableError(_(\"User is not muted\"))\n\n do_unmute_user(mute_object)\n return json_success()\n", "path": "zerver/views/muting.py"}]}
| 1,732 | 277 |
gh_patches_debug_28380
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5679
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add clearer action reminder in Journalist Interface to enable v3
As part of the 1.7.0 release, we've agreed that we want to improve our reminder in the Journalist Interface to enable v3 onion services. The current banner looks as follows:

You can also see it on https://demo-journalist.securedrop.org/ after logging in using the dev credentials.
The plan of record is as follows:
- SecureDrop 1.7.0 (released in January) will still fully support v2 onion services
- SecureDrop 1.8.0 (released in February) will support Ubuntu 20.04 (#4768), and _exclusively_ support v3 onion services for Focal installs, without changing the behavior on Xenial.
- After April 30 (Ubuntu 16.04 EOL), instances not upgraded to Ubuntu 20.04 will self-disable.
In other words, the v3 switch is unavoidable due to the Xenial end-of-life. Our goal with the 1.7.0 release is to get more admins to make the switch prior to a reinstall on Focal, to make the process a bit easier for them. The specific action we need to motivate:
1) *If v3 is already enabled, but v2 services are still available:* Disable v2 services, ensure that all journalists/admins have v3 creds, and ensure that landing page points to v3 onion.
2) *If v3 is not enabled yet:* Enable v3 services (can still run them alongside v2 for a bit, per docs, then go v3-only).
We've discussed that this could potentially be done via different banners for 1) and 2) in the Journalist Interface. Let's kick around language/UX a bit in the comments, and I'll add the final agreed upon spec to the top-level issue.
## User Story
As an administrator, I want to be reminded of critical actions I must take to keep my instance running, so that I'm not caught by surprise.
</issue>
<code>
[start of securedrop/journalist_app/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 from datetime import datetime, timedelta
4 from flask import (Flask, session, redirect, url_for, flash, g, request,
5 render_template)
6 from flask_assets import Environment
7 from flask_babel import gettext
8 from flask_wtf.csrf import CSRFProtect, CSRFError
9 from os import path
10 import sys
11 from werkzeug.exceptions import default_exceptions
12
13 import i18n
14 import template_filters
15 import version
16
17 from crypto_util import CryptoUtil
18 from db import db
19 from journalist_app import account, admin, api, main, col
20 from journalist_app.utils import (get_source, logged_in,
21 JournalistInterfaceSessionInterface,
22 cleanup_expired_revoked_tokens)
23 from models import InstanceConfig, Journalist
24 from store import Storage
25
26 import typing
27 # https://www.python.org/dev/peps/pep-0484/#runtime-or-type-checking
28 if typing.TYPE_CHECKING:
29 # flake8 can not understand type annotation yet.
30 # That is why all type annotation relative import
31 # statements has to be marked as noqa.
32 # http://flake8.pycqa.org/en/latest/user/error-codes.html?highlight=f401
33 from sdconfig import SDConfig # noqa: F401
34 from typing import Optional, Union, Tuple, Any # noqa: F401
35 from werkzeug import Response # noqa: F401
36 from werkzeug.exceptions import HTTPException # noqa: F401
37
38 _insecure_views = ['main.login', 'main.select_logo', 'static']
39
40
41 def create_app(config: 'SDConfig') -> Flask:
42 app = Flask(__name__,
43 template_folder=config.JOURNALIST_TEMPLATES_DIR,
44 static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
45
46 app.config.from_object(config.JOURNALIST_APP_FLASK_CONFIG_CLS)
47 app.session_interface = JournalistInterfaceSessionInterface()
48
49 csrf = CSRFProtect(app)
50 Environment(app)
51
52 app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
53 app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI
54 db.init_app(app)
55
56 v2_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v2_url'))
57 v3_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v3_url'))
58 app.config.update(V2_ONION_ENABLED=v2_enabled, V3_ONION_ENABLED=v3_enabled)
59
60 # TODO: Attaching a Storage dynamically like this disables all type checking (and
61 # breaks code analysis tools) for code that uses current_app.storage; it should be refactored
62 app.storage = Storage(config.STORE_DIR,
63 config.TEMP_DIR,
64 config.JOURNALIST_KEY)
65
66 # TODO: Attaching a CryptoUtil dynamically like this disables all type checking (and
67 # breaks code analysis tools) for code that uses current_app.storage; it should be refactored
68 app.crypto_util = CryptoUtil(
69 scrypt_params=config.SCRYPT_PARAMS,
70 scrypt_id_pepper=config.SCRYPT_ID_PEPPER,
71 scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,
72 securedrop_root=config.SECUREDROP_ROOT,
73 nouns_file=config.NOUNS,
74 adjectives_file=config.ADJECTIVES,
75 gpg_key_dir=config.GPG_KEY_DIR,
76 )
77
78 @app.errorhandler(CSRFError)
79 def handle_csrf_error(e: CSRFError) -> 'Response':
80 # render the message first to ensure it's localized.
81 msg = gettext('You have been logged out due to inactivity.')
82 session.clear()
83 flash(msg, 'error')
84 return redirect(url_for('main.login'))
85
86 def _handle_http_exception(
87 error: 'HTTPException'
88 ) -> 'Tuple[Union[Response, str], Optional[int]]':
89 # Workaround for no blueprint-level 404/5 error handlers, see:
90 # https://github.com/pallets/flask/issues/503#issuecomment-71383286
91 handler = list(app.error_handler_spec['api'][error.code].values())[0]
92 if request.path.startswith('/api/') and handler:
93 return handler(error)
94
95 return render_template('error.html', error=error), error.code
96
97 for code in default_exceptions:
98 app.errorhandler(code)(_handle_http_exception)
99
100 i18n.setup_app(config, app)
101
102 app.jinja_env.trim_blocks = True
103 app.jinja_env.lstrip_blocks = True
104 app.jinja_env.globals['version'] = version.__version__
105 app.jinja_env.filters['rel_datetime_format'] = \
106 template_filters.rel_datetime_format
107 app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
108
109 @app.before_first_request
110 def expire_blacklisted_tokens() -> None:
111 cleanup_expired_revoked_tokens()
112
113 @app.before_request
114 def load_instance_config() -> None:
115 app.instance_config = InstanceConfig.get_current()
116
117 @app.before_request
118 def setup_g() -> 'Optional[Response]':
119 """Store commonly used values in Flask's special g object"""
120 if 'expires' in session and datetime.utcnow() >= session['expires']:
121 session.clear()
122 flash(gettext('You have been logged out due to inactivity.'),
123 'error')
124
125 uid = session.get('uid', None)
126 if uid:
127 user = Journalist.query.get(uid)
128 if user and 'nonce' in session and \
129 session['nonce'] != user.session_nonce:
130 session.clear()
131 flash(gettext('You have been logged out due to password change'),
132 'error')
133
134 session['expires'] = datetime.utcnow() + \
135 timedelta(minutes=getattr(config,
136 'SESSION_EXPIRATION_MINUTES',
137 120))
138
139 # Work around https://github.com/lepture/flask-wtf/issues/275
140 # -- after upgrading from Python 2 to Python 3, any existing
141 # session's csrf_token value will be retrieved as bytes,
142 # causing a TypeError. This simple fix, deleting the existing
143 # token, was suggested in the issue comments. This code will
144 # be safe to remove after Python 2 reaches EOL in 2020, and no
145 # supported SecureDrop installations can still have this
146 # problem.
147 if sys.version_info.major > 2 and type(session.get('csrf_token')) is bytes:
148 del session['csrf_token']
149
150 uid = session.get('uid', None)
151 if uid:
152 g.user = Journalist.query.get(uid)
153
154 g.locale = i18n.get_locale(config)
155 g.text_direction = i18n.get_text_direction(g.locale)
156 g.html_lang = i18n.locale_to_rfc_5646(g.locale)
157 g.locales = i18n.get_locale2name()
158
159 if app.instance_config.organization_name:
160 g.organization_name = app.instance_config.organization_name
161 else:
162 g.organization_name = gettext('SecureDrop')
163
164 if not app.config['V3_ONION_ENABLED'] or app.config['V2_ONION_ENABLED']:
165 g.show_v2_onion_eol_warning = True
166
167 if request.path.split('/')[1] == 'api':
168 pass # We use the @token_required decorator for the API endpoints
169 else: # We are not using the API
170 if request.endpoint not in _insecure_views and not logged_in():
171 return redirect(url_for('main.login'))
172
173 if request.method == 'POST':
174 filesystem_id = request.form.get('filesystem_id')
175 if filesystem_id:
176 g.filesystem_id = filesystem_id
177 g.source = get_source(filesystem_id)
178
179 return None
180
181 app.register_blueprint(main.make_blueprint(config))
182 app.register_blueprint(account.make_blueprint(config),
183 url_prefix='/account')
184 app.register_blueprint(admin.make_blueprint(config), url_prefix='/admin')
185 app.register_blueprint(col.make_blueprint(config), url_prefix='/col')
186 api_blueprint = api.make_blueprint(config)
187 app.register_blueprint(api_blueprint, url_prefix='/api/v1')
188 csrf.exempt(api_blueprint)
189
190 return app
191
[end of securedrop/journalist_app/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/securedrop/journalist_app/__init__.py b/securedrop/journalist_app/__init__.py
--- a/securedrop/journalist_app/__init__.py
+++ b/securedrop/journalist_app/__init__.py
@@ -53,8 +53,13 @@
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI
db.init_app(app)
- v2_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v2_url'))
- v3_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v3_url'))
+ def _url_exists(u: str) -> bool:
+ return path.exists(path.join(config.SECUREDROP_DATA_ROOT, u))
+
+ v2_enabled = _url_exists('source_v2_url') or ((not _url_exists('source_v2_url'))
+ and (not _url_exists('source_v3_url')))
+ v3_enabled = _url_exists('source_v3_url')
+
app.config.update(V2_ONION_ENABLED=v2_enabled, V3_ONION_ENABLED=v3_enabled)
# TODO: Attaching a Storage dynamically like this disables all type checking (and
@@ -161,9 +166,12 @@
else:
g.organization_name = gettext('SecureDrop')
- if not app.config['V3_ONION_ENABLED'] or app.config['V2_ONION_ENABLED']:
+ if app.config['V2_ONION_ENABLED'] and not app.config['V3_ONION_ENABLED']:
g.show_v2_onion_eol_warning = True
+ if app.config['V2_ONION_ENABLED'] and app.config['V3_ONION_ENABLED']:
+ g.show_v2_onion_migration_warning = True
+
if request.path.split('/')[1] == 'api':
pass # We use the @token_required decorator for the API endpoints
else: # We are not using the API
|
{"golden_diff": "diff --git a/securedrop/journalist_app/__init__.py b/securedrop/journalist_app/__init__.py\n--- a/securedrop/journalist_app/__init__.py\n+++ b/securedrop/journalist_app/__init__.py\n@@ -53,8 +53,13 @@\n app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI\n db.init_app(app)\n \n- v2_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v2_url'))\n- v3_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v3_url'))\n+ def _url_exists(u: str) -> bool:\n+ return path.exists(path.join(config.SECUREDROP_DATA_ROOT, u))\n+\n+ v2_enabled = _url_exists('source_v2_url') or ((not _url_exists('source_v2_url'))\n+ and (not _url_exists('source_v3_url')))\n+ v3_enabled = _url_exists('source_v3_url')\n+\n app.config.update(V2_ONION_ENABLED=v2_enabled, V3_ONION_ENABLED=v3_enabled)\n \n # TODO: Attaching a Storage dynamically like this disables all type checking (and\n@@ -161,9 +166,12 @@\n else:\n g.organization_name = gettext('SecureDrop')\n \n- if not app.config['V3_ONION_ENABLED'] or app.config['V2_ONION_ENABLED']:\n+ if app.config['V2_ONION_ENABLED'] and not app.config['V3_ONION_ENABLED']:\n g.show_v2_onion_eol_warning = True\n \n+ if app.config['V2_ONION_ENABLED'] and app.config['V3_ONION_ENABLED']:\n+ g.show_v2_onion_migration_warning = True\n+\n if request.path.split('/')[1] == 'api':\n pass # We use the @token_required decorator for the API endpoints\n else: # We are not using the API\n", "issue": "Add clearer action reminder in Journalist Interface to enable v3\nAs part of the 1.7.0 release, we've agreed that we want to improve our reminder in the Journalist Interface to enable v3 onion services. The current banner looks as follows:\r\n\r\n\r\n\r\nYou can also see it on https://demo-journalist.securedrop.org/ after logging in using the dev credentials.\r\n\r\nThe plan of record is as follows:\r\n- SecureDrop 1.7.0 (released in January) will still fully support v2 onion services\r\n- SecureDrop 1.8.0 (released in February) will support Ubuntu 20.04 (#4768), and _exclusively_ support v3 onion services for Focal installs, without changing the behavior on Xenial.\r\n- After April 30 (Ubuntu 16.04 EOL), instances not upgraded to Ubuntu 20.04 will self-disable.\r\n\r\nIn other words, the v3 switch is unavoidable due to the Xenial end-of-life. Our goal with the 1.7.0 release is to get more admins to make the switch prior to a reinstall on Focal, to make the process a bit easier for them. The specific action we need to motivate:\r\n\r\n1) *If v3 is already enabled, but v2 services are still available:* Disable v2 services, ensure that all journalists/admins have v3 creds, and ensure that landing page points to v3 onion.\r\n\r\n2) *If v3 is not enabled yet:* Enable v3 services (can still run them alongside v2 for a bit, per docs, then go v3-only).\r\n\r\nWe've discussed that this could potentially be done via different banners for 1) and 2) in the Journalist Interface. Let's kick around language/UX a bit in the comments, and I'll add the final agreed upon spec to the top-level issue.\r\n\r\n## User Story\r\n\r\nAs an administrator, I want to be reminded of critical actions I must take to keep my instance running, so that I'm not caught by surprise.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime, timedelta\nfrom flask import (Flask, session, redirect, url_for, flash, g, request,\n render_template)\nfrom flask_assets import Environment\nfrom flask_babel import gettext\nfrom flask_wtf.csrf import CSRFProtect, CSRFError\nfrom os import path\nimport sys\nfrom werkzeug.exceptions import default_exceptions\n\nimport i18n\nimport template_filters\nimport version\n\nfrom crypto_util import CryptoUtil\nfrom db import db\nfrom journalist_app import account, admin, api, main, col\nfrom journalist_app.utils import (get_source, logged_in,\n JournalistInterfaceSessionInterface,\n cleanup_expired_revoked_tokens)\nfrom models import InstanceConfig, Journalist\nfrom store import Storage\n\nimport typing\n# https://www.python.org/dev/peps/pep-0484/#runtime-or-type-checking\nif typing.TYPE_CHECKING:\n # flake8 can not understand type annotation yet.\n # That is why all type annotation relative import\n # statements has to be marked as noqa.\n # http://flake8.pycqa.org/en/latest/user/error-codes.html?highlight=f401\n from sdconfig import SDConfig # noqa: F401\n from typing import Optional, Union, Tuple, Any # noqa: F401\n from werkzeug import Response # noqa: F401\n from werkzeug.exceptions import HTTPException # noqa: F401\n\n_insecure_views = ['main.login', 'main.select_logo', 'static']\n\n\ndef create_app(config: 'SDConfig') -> Flask:\n app = Flask(__name__,\n template_folder=config.JOURNALIST_TEMPLATES_DIR,\n static_folder=path.join(config.SECUREDROP_ROOT, 'static'))\n\n app.config.from_object(config.JOURNALIST_APP_FLASK_CONFIG_CLS)\n app.session_interface = JournalistInterfaceSessionInterface()\n\n csrf = CSRFProtect(app)\n Environment(app)\n\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI\n db.init_app(app)\n\n v2_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v2_url'))\n v3_enabled = path.exists(path.join(config.SECUREDROP_DATA_ROOT, 'source_v3_url'))\n app.config.update(V2_ONION_ENABLED=v2_enabled, V3_ONION_ENABLED=v3_enabled)\n\n # TODO: Attaching a Storage dynamically like this disables all type checking (and\n # breaks code analysis tools) for code that uses current_app.storage; it should be refactored\n app.storage = Storage(config.STORE_DIR,\n config.TEMP_DIR,\n config.JOURNALIST_KEY)\n\n # TODO: Attaching a CryptoUtil dynamically like this disables all type checking (and\n # breaks code analysis tools) for code that uses current_app.storage; it should be refactored\n app.crypto_util = CryptoUtil(\n scrypt_params=config.SCRYPT_PARAMS,\n scrypt_id_pepper=config.SCRYPT_ID_PEPPER,\n scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,\n securedrop_root=config.SECUREDROP_ROOT,\n nouns_file=config.NOUNS,\n adjectives_file=config.ADJECTIVES,\n gpg_key_dir=config.GPG_KEY_DIR,\n )\n\n @app.errorhandler(CSRFError)\n def handle_csrf_error(e: CSRFError) -> 'Response':\n # render the message first to ensure it's localized.\n msg = gettext('You have been logged out due to inactivity.')\n session.clear()\n flash(msg, 'error')\n return redirect(url_for('main.login'))\n\n def _handle_http_exception(\n error: 'HTTPException'\n ) -> 'Tuple[Union[Response, str], Optional[int]]':\n # Workaround for no blueprint-level 404/5 error handlers, see:\n # https://github.com/pallets/flask/issues/503#issuecomment-71383286\n handler = list(app.error_handler_spec['api'][error.code].values())[0]\n if request.path.startswith('/api/') and handler:\n return handler(error)\n\n return render_template('error.html', error=error), error.code\n\n for code in default_exceptions:\n app.errorhandler(code)(_handle_http_exception)\n\n i18n.setup_app(config, app)\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n app.jinja_env.globals['version'] = version.__version__\n app.jinja_env.filters['rel_datetime_format'] = \\\n template_filters.rel_datetime_format\n app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat\n\n @app.before_first_request\n def expire_blacklisted_tokens() -> None:\n cleanup_expired_revoked_tokens()\n\n @app.before_request\n def load_instance_config() -> None:\n app.instance_config = InstanceConfig.get_current()\n\n @app.before_request\n def setup_g() -> 'Optional[Response]':\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n if 'expires' in session and datetime.utcnow() >= session['expires']:\n session.clear()\n flash(gettext('You have been logged out due to inactivity.'),\n 'error')\n\n uid = session.get('uid', None)\n if uid:\n user = Journalist.query.get(uid)\n if user and 'nonce' in session and \\\n session['nonce'] != user.session_nonce:\n session.clear()\n flash(gettext('You have been logged out due to password change'),\n 'error')\n\n session['expires'] = datetime.utcnow() + \\\n timedelta(minutes=getattr(config,\n 'SESSION_EXPIRATION_MINUTES',\n 120))\n\n # Work around https://github.com/lepture/flask-wtf/issues/275\n # -- after upgrading from Python 2 to Python 3, any existing\n # session's csrf_token value will be retrieved as bytes,\n # causing a TypeError. This simple fix, deleting the existing\n # token, was suggested in the issue comments. This code will\n # be safe to remove after Python 2 reaches EOL in 2020, and no\n # supported SecureDrop installations can still have this\n # problem.\n if sys.version_info.major > 2 and type(session.get('csrf_token')) is bytes:\n del session['csrf_token']\n\n uid = session.get('uid', None)\n if uid:\n g.user = Journalist.query.get(uid)\n\n g.locale = i18n.get_locale(config)\n g.text_direction = i18n.get_text_direction(g.locale)\n g.html_lang = i18n.locale_to_rfc_5646(g.locale)\n g.locales = i18n.get_locale2name()\n\n if app.instance_config.organization_name:\n g.organization_name = app.instance_config.organization_name\n else:\n g.organization_name = gettext('SecureDrop')\n\n if not app.config['V3_ONION_ENABLED'] or app.config['V2_ONION_ENABLED']:\n g.show_v2_onion_eol_warning = True\n\n if request.path.split('/')[1] == 'api':\n pass # We use the @token_required decorator for the API endpoints\n else: # We are not using the API\n if request.endpoint not in _insecure_views and not logged_in():\n return redirect(url_for('main.login'))\n\n if request.method == 'POST':\n filesystem_id = request.form.get('filesystem_id')\n if filesystem_id:\n g.filesystem_id = filesystem_id\n g.source = get_source(filesystem_id)\n\n return None\n\n app.register_blueprint(main.make_blueprint(config))\n app.register_blueprint(account.make_blueprint(config),\n url_prefix='/account')\n app.register_blueprint(admin.make_blueprint(config), url_prefix='/admin')\n app.register_blueprint(col.make_blueprint(config), url_prefix='/col')\n api_blueprint = api.make_blueprint(config)\n app.register_blueprint(api_blueprint, url_prefix='/api/v1')\n csrf.exempt(api_blueprint)\n\n return app\n", "path": "securedrop/journalist_app/__init__.py"}]}
| 3,286 | 432 |
gh_patches_debug_15617
|
rasdani/github-patches
|
git_diff
|
python__peps-2826
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Decide what PEPs fall under `Topic: Packaging` and act accordingly
Right now, as originally discussed in #2096 , there seem to be two plausible interpretation of what qualifies for the `Topic: Packaging` label: PEPs that _relate_ to packaging in some direct way, and PEPs that apply to packaging ecosystem (as opposed to the stdlib), and are thus discussed primarily on packaging community fora and accepted/rejected by the relevant packaging-community standing PEP delegate.
In particular, given PEP 632 (PEP-632), on deprecating and removing `disutils`, was removed from packaging at the request of @zooba for relating to the core stdlib, this would seem to suggest that PEP 453 (PEP-453) and PEP 477 (PEP-477), on `ensurepip` and its backport to Python 2.7, respectively, should also be removed by the same standard (or, alternatively, PEP 632 restored).
Presuming consensus can be reached prior to its merging, I can implement this on the existing #2690 , given it makes a number of related improvements/refinements to the packaging-tagged PEP headers.
<details>
<summary>Relevant discussion from #2096 (click to expand)</summary>
@pfmoore
> this is a core Python PEP, not a packaging PEP, and like @dstufft noted, I don't think we tend to modify those after approval.
@CAM-Gerlach
> In that case, as originally discussed on https://github.com/python/peps/pull/2656 and done for PEP 632 ([PEP-632](https://peps.python.org/632)), maybe we should drop Topic: Packaging from this PEP as well as the related PEP 477 ([PEP-477](https://peps.python.org/477)) ? I can do it in https://github.com/python/peps/pull/2690 since its closely related to the other changes in that PR (also, your input there on updating PEP 262 ([PEP-262](https://peps.python.org/262))'s status to reflect reality would be helpful).
@pfmoore
> I don't know on that. It's packaging related in the sense that it is about packaging tools, so if people are looking for things that are "about" packaging, it does belong in the Packaging topic. But it's not a "PyPA specification" in the sense defined [here](https://www.pypa.io/en/latest/specifications/), which is why I said that the normal PEP rules about modifications should apply, rather than the PyPA-specific process. Ultimately, I'm not actually sure what the rules are for when a PEP qualifies as being in the Packaging topic...
@CAM-Gerlach
> Yeah, exactly. That basically comes down to whether a Topic is considered more of a "Category" (i.e. the former), or a "Track" (i.e. the latter). I presented both alternatives at the beginning of the discussion, and both were favored at different points in its evolution; the initial consensus in the Discourse thread was something more like a "Track", but on the PR and naming of the "Topic" header that was eventually implemented (that allows multiple topics per PR), it ended up basically the latter, at least nominally.
>
> However, with PEP 632 ([PEP-632](https://peps.python.org/632)) being removed at the behest of @zooba in https://github.com/python/peps/pull/2636#discussion_r898173875 , given these two PEPs basically just concern adding/backporting a utility module from the core standard library rather than PyPA/PyPI standards, it seems to me to not make sense to keep those PEPs but not another very similar one about the deprecation and removal of what was the core packaging module. So, I suggest either removing those two, or adding back PEP 632.
@pradyunsg
> Alternatively, we can defer doing anything here until the authors have a concern with being listed under the packaging index as well or till we see/identify more instances of something like this? In any case, this discussion should move to a separate issue IMO.
@zooba
> I think this is in the same boat as PEP 632, and shouldn't be tagged as a packaging PEP.
>
> As a proposal of the rule I'd use, if we'd let the packaging delegate (i.e. Paul, right now) decide without going to python-dev or the SC, then it's a packaging PEP. Whether something is added to the standard library clearly falls outside of this scope, and so this PEP is standards track and not packaging.
</details>
</issue>
<code>
[start of pep_sphinx_extensions/pep_zero_generator/constants.py]
1 """Holds type and status constants for PEP 0 generation."""
2
3 STATUS_ACCEPTED = "Accepted"
4 STATUS_ACTIVE = "Active"
5 STATUS_DEFERRED = "Deferred"
6 STATUS_DRAFT = "Draft"
7 STATUS_FINAL = "Final"
8 STATUS_PROVISIONAL = "Provisional"
9 STATUS_REJECTED = "Rejected"
10 STATUS_SUPERSEDED = "Superseded"
11 STATUS_WITHDRAWN = "Withdrawn"
12
13 # Valid values for the Status header.
14 STATUS_VALUES = {
15 STATUS_ACCEPTED, STATUS_PROVISIONAL, STATUS_REJECTED, STATUS_WITHDRAWN,
16 STATUS_DEFERRED, STATUS_FINAL, STATUS_ACTIVE, STATUS_DRAFT, STATUS_SUPERSEDED,
17 }
18 # Map of invalid/special statuses to their valid counterparts
19 SPECIAL_STATUSES = {
20 "April Fool!": STATUS_REJECTED, # See PEP 401 :)
21 }
22 # Draft PEPs have no status displayed, Active shares a key with Accepted
23 HIDE_STATUS = {STATUS_DRAFT, STATUS_ACTIVE}
24 # Dead PEP statuses
25 DEAD_STATUSES = {STATUS_REJECTED, STATUS_WITHDRAWN, STATUS_SUPERSEDED}
26
27 TYPE_INFO = "Informational"
28 TYPE_PROCESS = "Process"
29 TYPE_STANDARDS = "Standards Track"
30
31 # Valid values for the Type header.
32 TYPE_VALUES = {TYPE_STANDARDS, TYPE_INFO, TYPE_PROCESS}
33 # Active PEPs can only be for Informational or Process PEPs.
34 ACTIVE_ALLOWED = {TYPE_PROCESS, TYPE_INFO}
35
36 # map of topic -> additional description
37 SUBINDICES_BY_TOPIC = {
38 "packaging": """\
39 The canonical, up-to-date packaging specifications can be found on the
40 `Python Packaging Authority`_ (PyPA) `specifications`_ page.
41 Packaging PEPs follow the `PyPA specification update process`_.
42 They are used to propose major additions or changes to the PyPA specifications.
43
44 .. _Python Packaging Authority: https://www.pypa.io/
45 .. _specifications: https://packaging.python.org/en/latest/specifications/
46 .. _PyPA specification update process: https://www.pypa.io/en/latest/specifications/#specification-update-process
47 """,
48 "release": """\
49 A PEP is written to specify the release cycle for each feature release of Python.
50 See the `developer's guide`_ for more information.
51
52 .. _developer's guide: https://devguide.python.org/devcycle/
53 """,
54 "typing": """\
55 Many recent PEPs propose changes to Python's static type system
56 or otherwise relate to type annotations.
57 They are listed here for reference.
58 """
59 }
60
[end of pep_sphinx_extensions/pep_zero_generator/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pep_sphinx_extensions/pep_zero_generator/constants.py b/pep_sphinx_extensions/pep_zero_generator/constants.py
--- a/pep_sphinx_extensions/pep_zero_generator/constants.py
+++ b/pep_sphinx_extensions/pep_zero_generator/constants.py
@@ -36,10 +36,10 @@
# map of topic -> additional description
SUBINDICES_BY_TOPIC = {
"packaging": """\
-The canonical, up-to-date packaging specifications can be found on the
-`Python Packaging Authority`_ (PyPA) `specifications`_ page.
Packaging PEPs follow the `PyPA specification update process`_.
They are used to propose major additions or changes to the PyPA specifications.
+The canonical, up-to-date packaging specifications can be found on the
+`Python Packaging Authority`_ (PyPA) `specifications`_ page.
.. _Python Packaging Authority: https://www.pypa.io/
.. _specifications: https://packaging.python.org/en/latest/specifications/
|
{"golden_diff": "diff --git a/pep_sphinx_extensions/pep_zero_generator/constants.py b/pep_sphinx_extensions/pep_zero_generator/constants.py\n--- a/pep_sphinx_extensions/pep_zero_generator/constants.py\n+++ b/pep_sphinx_extensions/pep_zero_generator/constants.py\n@@ -36,10 +36,10 @@\n # map of topic -> additional description\n SUBINDICES_BY_TOPIC = {\n \"packaging\": \"\"\"\\\n-The canonical, up-to-date packaging specifications can be found on the\n-`Python Packaging Authority`_ (PyPA) `specifications`_ page.\n Packaging PEPs follow the `PyPA specification update process`_.\n They are used to propose major additions or changes to the PyPA specifications.\n+The canonical, up-to-date packaging specifications can be found on the\n+`Python Packaging Authority`_ (PyPA) `specifications`_ page.\n \n .. _Python Packaging Authority: https://www.pypa.io/\n .. _specifications: https://packaging.python.org/en/latest/specifications/\n", "issue": "Decide what PEPs fall under `Topic: Packaging` and act accordingly\nRight now, as originally discussed in #2096 , there seem to be two plausible interpretation of what qualifies for the `Topic: Packaging` label: PEPs that _relate_ to packaging in some direct way, and PEPs that apply to packaging ecosystem (as opposed to the stdlib), and are thus discussed primarily on packaging community fora and accepted/rejected by the relevant packaging-community standing PEP delegate.\r\n\r\nIn particular, given PEP 632 (PEP-632), on deprecating and removing `disutils`, was removed from packaging at the request of @zooba for relating to the core stdlib, this would seem to suggest that PEP 453 (PEP-453) and PEP 477 (PEP-477), on `ensurepip` and its backport to Python 2.7, respectively, should also be removed by the same standard (or, alternatively, PEP 632 restored).\r\n\r\nPresuming consensus can be reached prior to its merging, I can implement this on the existing #2690 , given it makes a number of related improvements/refinements to the packaging-tagged PEP headers.\r\n\r\n<details>\r\n\r\n<summary>Relevant discussion from #2096 (click to expand)</summary>\r\n\r\n@pfmoore \r\n\r\n> this is a core Python PEP, not a packaging PEP, and like @dstufft noted, I don't think we tend to modify those after approval.\r\n\r\n@CAM-Gerlach \r\n\r\n> In that case, as originally discussed on https://github.com/python/peps/pull/2656 and done for PEP 632 ([PEP-632](https://peps.python.org/632)), maybe we should drop Topic: Packaging from this PEP as well as the related PEP 477 ([PEP-477](https://peps.python.org/477)) ? I can do it in https://github.com/python/peps/pull/2690 since its closely related to the other changes in that PR (also, your input there on updating PEP 262 ([PEP-262](https://peps.python.org/262))'s status to reflect reality would be helpful).\r\n\r\n@pfmoore \r\n\r\n> I don't know on that. It's packaging related in the sense that it is about packaging tools, so if people are looking for things that are \"about\" packaging, it does belong in the Packaging topic. But it's not a \"PyPA specification\" in the sense defined [here](https://www.pypa.io/en/latest/specifications/), which is why I said that the normal PEP rules about modifications should apply, rather than the PyPA-specific process. Ultimately, I'm not actually sure what the rules are for when a PEP qualifies as being in the Packaging topic...\r\n\r\n@CAM-Gerlach \r\n\r\n> Yeah, exactly. That basically comes down to whether a Topic is considered more of a \"Category\" (i.e. the former), or a \"Track\" (i.e. the latter). I presented both alternatives at the beginning of the discussion, and both were favored at different points in its evolution; the initial consensus in the Discourse thread was something more like a \"Track\", but on the PR and naming of the \"Topic\" header that was eventually implemented (that allows multiple topics per PR), it ended up basically the latter, at least nominally.\r\n>\r\n> However, with PEP 632 ([PEP-632](https://peps.python.org/632)) being removed at the behest of @zooba in https://github.com/python/peps/pull/2636#discussion_r898173875 , given these two PEPs basically just concern adding/backporting a utility module from the core standard library rather than PyPA/PyPI standards, it seems to me to not make sense to keep those PEPs but not another very similar one about the deprecation and removal of what was the core packaging module. So, I suggest either removing those two, or adding back PEP 632.\r\n\r\n@pradyunsg \r\n\r\n> Alternatively, we can defer doing anything here until the authors have a concern with being listed under the packaging index as well or till we see/identify more instances of something like this? In any case, this discussion should move to a separate issue IMO.\r\n\r\n@zooba \r\n\r\n> I think this is in the same boat as PEP 632, and shouldn't be tagged as a packaging PEP.\r\n>\r\n> As a proposal of the rule I'd use, if we'd let the packaging delegate (i.e. Paul, right now) decide without going to python-dev or the SC, then it's a packaging PEP. Whether something is added to the standard library clearly falls outside of this scope, and so this PEP is standards track and not packaging.\r\n\r\n</details>\n", "before_files": [{"content": "\"\"\"Holds type and status constants for PEP 0 generation.\"\"\"\n\nSTATUS_ACCEPTED = \"Accepted\"\nSTATUS_ACTIVE = \"Active\"\nSTATUS_DEFERRED = \"Deferred\"\nSTATUS_DRAFT = \"Draft\"\nSTATUS_FINAL = \"Final\"\nSTATUS_PROVISIONAL = \"Provisional\"\nSTATUS_REJECTED = \"Rejected\"\nSTATUS_SUPERSEDED = \"Superseded\"\nSTATUS_WITHDRAWN = \"Withdrawn\"\n\n# Valid values for the Status header.\nSTATUS_VALUES = {\n STATUS_ACCEPTED, STATUS_PROVISIONAL, STATUS_REJECTED, STATUS_WITHDRAWN,\n STATUS_DEFERRED, STATUS_FINAL, STATUS_ACTIVE, STATUS_DRAFT, STATUS_SUPERSEDED,\n}\n# Map of invalid/special statuses to their valid counterparts\nSPECIAL_STATUSES = {\n \"April Fool!\": STATUS_REJECTED, # See PEP 401 :)\n}\n# Draft PEPs have no status displayed, Active shares a key with Accepted\nHIDE_STATUS = {STATUS_DRAFT, STATUS_ACTIVE}\n# Dead PEP statuses\nDEAD_STATUSES = {STATUS_REJECTED, STATUS_WITHDRAWN, STATUS_SUPERSEDED}\n\nTYPE_INFO = \"Informational\"\nTYPE_PROCESS = \"Process\"\nTYPE_STANDARDS = \"Standards Track\"\n\n# Valid values for the Type header.\nTYPE_VALUES = {TYPE_STANDARDS, TYPE_INFO, TYPE_PROCESS}\n# Active PEPs can only be for Informational or Process PEPs.\nACTIVE_ALLOWED = {TYPE_PROCESS, TYPE_INFO}\n\n# map of topic -> additional description\nSUBINDICES_BY_TOPIC = {\n \"packaging\": \"\"\"\\\nThe canonical, up-to-date packaging specifications can be found on the\n`Python Packaging Authority`_ (PyPA) `specifications`_ page.\nPackaging PEPs follow the `PyPA specification update process`_.\nThey are used to propose major additions or changes to the PyPA specifications.\n\n.. _Python Packaging Authority: https://www.pypa.io/\n.. _specifications: https://packaging.python.org/en/latest/specifications/\n.. _PyPA specification update process: https://www.pypa.io/en/latest/specifications/#specification-update-process\n\"\"\",\n \"release\": \"\"\"\\\nA PEP is written to specify the release cycle for each feature release of Python.\nSee the `developer's guide`_ for more information.\n\n.. _developer's guide: https://devguide.python.org/devcycle/\n\"\"\",\n \"typing\": \"\"\"\\\nMany recent PEPs propose changes to Python's static type system\nor otherwise relate to type annotations.\nThey are listed here for reference.\n\"\"\"\n}\n", "path": "pep_sphinx_extensions/pep_zero_generator/constants.py"}]}
| 2,269 | 226 |
gh_patches_debug_38505
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-1821
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Participante de Comissão - data de desligamento
a data de desligamento deve ser posterior à data da designação.
</issue>
<code>
[start of sapl/comissoes/forms.py]
1 from django import forms
2 from django.contrib.contenttypes.models import ContentType
3 from django.core.exceptions import ValidationError
4 from django.db import transaction
5 from django.db.models import Q
6 from django.forms import ModelForm
7 from django.utils.translation import ugettext_lazy as _
8
9 from sapl.base.models import Autor, TipoAutor
10 from sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,
11 Participacao, Reuniao)
12 from sapl.parlamentares.models import Legislatura, Mandato, Parlamentar
13
14 class ComposicaoForm(forms.ModelForm):
15
16 class Meta:
17 model = Composicao
18 exclude = []
19
20 def clean(self):
21 cleaned_data = super(ComposicaoForm, self).clean()
22
23 if not self.is_valid():
24 return cleaned_data
25
26 periodo = cleaned_data['periodo']
27 comissao_pk = cleaned_data['comissao'].id
28 intersecao_periodo = Composicao.objects.filter(
29 Q(periodo__data_inicio__lte=periodo.data_fim,
30 periodo__data_fim__gte=periodo.data_fim) |
31 Q(periodo__data_inicio__gte=periodo.data_inicio,
32 periodo__data_fim__lte=periodo.data_inicio),
33 comissao_id=comissao_pk)
34
35 if intersecao_periodo:
36 raise ValidationError('O período informado '
37 'choca com períodos já '
38 'cadastrados para esta comissão')
39
40 return cleaned_data
41
42 class ParticipacaoCreateForm(forms.ModelForm):
43
44 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
45
46 class Meta:
47 model = Participacao
48 fields = '__all__'
49 exclude = ['composicao']
50
51 def __init__(self, user=None, **kwargs):
52 super(ParticipacaoCreateForm, self).__init__(**kwargs)
53
54 if self.instance:
55 comissao = kwargs['initial']
56 comissao_pk = int(comissao['parent_pk'])
57 composicao = Composicao.objects.get(id=comissao_pk)
58 participantes = composicao.participacao_set.all()
59 id_part = [p.parlamentar.id for p in participantes]
60 else:
61 id_part = []
62
63 qs = self.create_participacao()
64
65 parlamentares = Mandato.objects.filter(qs,
66 parlamentar__ativo=True
67 ).prefetch_related('parlamentar').\
68 values_list('parlamentar',
69 flat=True).distinct()
70
71 qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\
72 exclude(id__in=id_part)
73 eligible = self.verifica()
74 result = list(set(qs) & set(eligible))
75 if not cmp(result, eligible): # se igual a 0 significa que o qs e o eli são iguais!
76 self.fields['parlamentar'].queryset = qs
77 else:
78 ids = [e.id for e in eligible]
79 qs = Parlamentar.objects.filter(id__in=ids)
80 self.fields['parlamentar'].queryset = qs
81
82
83 def clean(self):
84 cleaned_data = super(ParticipacaoCreateForm, self).clean()
85
86 if not self.is_valid():
87 return cleaned_data
88
89 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
90 cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
91
92 if cleaned_data['cargo'].nome in cargos_unicos:
93 msg = _('Este cargo é único para esta Comissão.')
94 raise ValidationError(msg)
95
96
97 def create_participacao(self):
98 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
99 data_inicio_comissao = composicao.periodo.data_inicio
100 data_fim_comissao = composicao.periodo.data_fim
101 q1 = Q(data_fim_mandato__isnull=False,
102 data_fim_mandato__gte=data_inicio_comissao)
103 q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \
104 & Q(data_inicio_mandato__lte=data_fim_comissao)
105 q3 = Q(data_fim_mandato__isnull=True,
106 data_inicio_mandato__lte=data_inicio_comissao)
107 qs = q1 | q2 | q3
108 return qs
109
110 def verifica(self):
111 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
112 participantes = composicao.participacao_set.all()
113 participantes_id = [p.parlamentar.id for p in participantes]
114 parlamentares = Parlamentar.objects.all().exclude(
115 id__in=participantes_id).order_by('nome_completo')
116 parlamentares = [p for p in parlamentares if p.ativo]
117
118 lista = []
119
120 for p in parlamentares:
121 mandatos = p.mandato_set.all()
122 for m in mandatos:
123 data_inicio = m.data_inicio_mandato
124 data_fim = m.data_fim_mandato
125 comp_data_inicio = composicao.periodo.data_inicio
126 comp_data_fim = composicao.periodo.data_fim
127 if (data_fim and data_fim >= comp_data_inicio)\
128 or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\
129 or (data_fim is None and data_inicio <= comp_data_inicio):
130 lista.append(p)
131
132 lista = list(set(lista))
133
134 return lista
135
136
137 class ParticipacaoEditForm(forms.ModelForm):
138
139 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
140 nome_parlamentar = forms.CharField(required=False, label='Parlamentar')
141
142 class Meta:
143 model = Participacao
144 fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',
145 'data_designacao', 'data_desligamento',
146 'motivo_desligamento', 'observacao']
147 widgets = {
148 'parlamentar': forms.HiddenInput(),
149 }
150
151 def __init__(self, user=None, **kwargs):
152 super(ParticipacaoEditForm, self).__init__(**kwargs)
153 self.initial['nome_parlamentar'] = Parlamentar.objects.get(
154 id=self.initial['parlamentar']).nome_parlamentar
155 self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'
156
157
158 class ComissaoForm(forms.ModelForm):
159
160 class Meta:
161 model = Comissao
162 fields = '__all__'
163
164 def __init__(self, user=None, **kwargs):
165 super(ComissaoForm, self).__init__(**kwargs)
166 inst = self.instance
167 if inst.pk:
168 if inst.tipo.natureza == 'P':
169 self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'
170 self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'
171 self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'
172 self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'
173 self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'
174
175
176
177 def clean(self):
178 super(ComissaoForm, self).clean()
179
180 if not self.is_valid():
181 return self.cleaned_data
182
183 if self.cleaned_data['data_extincao']:
184 if (self.cleaned_data['data_extincao'] <
185 self.cleaned_data['data_criacao']):
186 msg = _('Data de extinção não pode ser menor que a de criação')
187 raise ValidationError(msg)
188 return self.cleaned_data
189
190 @transaction.atomic
191 def save(self, commit=True):
192 inst = self.instance
193 if not inst.pk:
194 comissao = super(ComissaoForm, self).save(commit)
195 content_type = ContentType.objects.get_for_model(Comissao)
196 object_id = comissao.pk
197 tipo = TipoAutor.objects.get(descricao__icontains='Comiss')
198 nome = comissao.sigla + ' - ' + comissao.nome
199 Autor.objects.create(
200 content_type=content_type,
201 object_id=object_id,
202 tipo=tipo,
203 nome=nome
204 )
205 return comissao
206 else:
207 comissao = super(ComissaoForm, self).save(commit)
208 return comissao
209
210
211 class ReuniaoForm(ModelForm):
212
213 comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),
214 widget=forms.HiddenInput())
215
216 class Meta:
217 model = Reuniao
218 exclude = ['cod_andamento_reuniao']
219
220 def clean(self):
221 super(ReuniaoForm, self).clean()
222
223 if not self.is_valid():
224 return self.cleaned_data
225
226 if self.cleaned_data['hora_fim']:
227 if (self.cleaned_data['hora_fim'] <
228 self.cleaned_data['hora_inicio']):
229 msg = _('A hora de término da reunião não pode ser menor que a de início')
230 raise ValidationError(msg)
231 return self.cleaned_data
232
233 class DocumentoAcessorioCreateForm(forms.ModelForm):
234
235 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
236
237 class Meta:
238 model = DocumentoAcessorio
239 exclude = ['reuniao']
240
241 def __init__(self, user=None, **kwargs):
242 super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)
243
244 if self.instance:
245 reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])
246 comissao = reuniao.comissao
247 comissao_pk = comissao.id
248 documentos = reuniao.documentoacessorio_set.all()
249 return self.create_documentoacessorio()
250
251
252 def create_documentoacessorio(self):
253 reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])
254
255
256 class DocumentoAcessorioEditForm(forms.ModelForm):
257
258 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
259
260 class Meta:
261 model = DocumentoAcessorio
262 fields = ['nome', 'data', 'autor', 'ementa',
263 'indexacao', 'arquivo']
264
265 def __init__(self, user=None, **kwargs):
266 super(DocumentoAcessorioEditForm, self).__init__(**kwargs)
267
[end of sapl/comissoes/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sapl/comissoes/forms.py b/sapl/comissoes/forms.py
--- a/sapl/comissoes/forms.py
+++ b/sapl/comissoes/forms.py
@@ -65,8 +65,9 @@
parlamentares = Mandato.objects.filter(qs,
parlamentar__ativo=True
).prefetch_related('parlamentar').\
- values_list('parlamentar',
- flat=True).distinct()
+ values_list('parlamentar',
+ flat=True
+ ).distinct()
qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\
exclude(id__in=id_part)
@@ -86,12 +87,21 @@
if not self.is_valid():
return cleaned_data
+ data_designacao = cleaned_data['data_designacao']
+ data_desligamento = cleaned_data['data_desligamento']
+
+ if data_desligamento and \
+ data_designacao > data_desligamento:
+ raise ValidationError(_('Data de designação não pode ser superior '
+ 'à data de desligamento'))
+
composicao = Composicao.objects.get(id=self.initial['parent_pk'])
cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
if cleaned_data['cargo'].nome in cargos_unicos:
msg = _('Este cargo é único para esta Comissão.')
raise ValidationError(msg)
+ return cleaned_data
def create_participacao(self):
@@ -154,6 +164,31 @@
id=self.initial['parlamentar']).nome_parlamentar
self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'
+ def clean(self):
+ cleaned_data = super(ParticipacaoEditForm, self).clean()
+
+ if not self.is_valid():
+ return cleaned_data
+
+ data_designacao = cleaned_data['data_designacao']
+ data_desligamento = cleaned_data['data_desligamento']
+
+ if data_desligamento and \
+ data_designacao > data_desligamento:
+ raise ValidationError(_('Data de designação não pode ser superior '
+ 'à data de desligamento'))
+
+ composicao_id = self.instance.composicao_id
+
+ composicao = Composicao.objects.get(id=composicao_id)
+ cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
+
+ if cleaned_data['cargo'].nome in cargos_unicos:
+ msg = _('Este cargo é único para esta Comissão.')
+ raise ValidationError(msg)
+
+ return cleaned_data
+
class ComissaoForm(forms.ModelForm):
|
{"golden_diff": "diff --git a/sapl/comissoes/forms.py b/sapl/comissoes/forms.py\n--- a/sapl/comissoes/forms.py\n+++ b/sapl/comissoes/forms.py\n@@ -65,8 +65,9 @@\n parlamentares = Mandato.objects.filter(qs,\n parlamentar__ativo=True\n ).prefetch_related('parlamentar').\\\n- values_list('parlamentar',\n- flat=True).distinct()\n+ values_list('parlamentar',\n+ flat=True\n+ ).distinct()\n \n qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\\\n exclude(id__in=id_part)\n@@ -86,12 +87,21 @@\n if not self.is_valid():\n return cleaned_data\n \n+ data_designacao = cleaned_data['data_designacao']\n+ data_desligamento = cleaned_data['data_desligamento']\n+\n+ if data_desligamento and \\\n+ data_designacao > data_desligamento:\n+ raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n+ '\u00e0 data de desligamento'))\n+\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n \n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n+ return cleaned_data\n \n \n def create_participacao(self):\n@@ -154,6 +164,31 @@\n id=self.initial['parlamentar']).nome_parlamentar\n self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'\n \n+ def clean(self):\n+ cleaned_data = super(ParticipacaoEditForm, self).clean()\n+\n+ if not self.is_valid():\n+ return cleaned_data\n+\n+ data_designacao = cleaned_data['data_designacao']\n+ data_desligamento = cleaned_data['data_desligamento']\n+\n+ if data_desligamento and \\\n+ data_designacao > data_desligamento:\n+ raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n+ '\u00e0 data de desligamento'))\n+\n+ composicao_id = self.instance.composicao_id\n+\n+ composicao = Composicao.objects.get(id=composicao_id)\n+ cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n+\n+ if cleaned_data['cargo'].nome in cargos_unicos:\n+ msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n+ raise ValidationError(msg)\n+\n+ return cleaned_data\n+\n \n class ComissaoForm(forms.ModelForm):\n", "issue": "Participante de Comiss\u00e3o - data de desligamento\na data de desligamento deve ser posterior \u00e0 data da designa\u00e7\u00e3o.\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,\n Participacao, Reuniao)\nfrom sapl.parlamentares.models import Legislatura, Mandato, Parlamentar\n\nclass ComposicaoForm(forms.ModelForm):\n\n class Meta:\n model = Composicao\n exclude = []\n\n def clean(self):\n cleaned_data = super(ComposicaoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n periodo = cleaned_data['periodo']\n comissao_pk = cleaned_data['comissao'].id\n intersecao_periodo = Composicao.objects.filter(\n Q(periodo__data_inicio__lte=periodo.data_fim,\n periodo__data_fim__gte=periodo.data_fim) |\n Q(periodo__data_inicio__gte=periodo.data_inicio,\n periodo__data_fim__lte=periodo.data_inicio),\n comissao_id=comissao_pk)\n\n if intersecao_periodo:\n raise ValidationError('O per\u00edodo informado '\n 'choca com per\u00edodos j\u00e1 '\n 'cadastrados para esta comiss\u00e3o')\n\n return cleaned_data\n\nclass ParticipacaoCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = Participacao\n fields = '__all__'\n exclude = ['composicao']\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n comissao = kwargs['initial']\n comissao_pk = int(comissao['parent_pk'])\n composicao = Composicao.objects.get(id=comissao_pk)\n participantes = composicao.participacao_set.all()\n id_part = [p.parlamentar.id for p in participantes]\n else:\n id_part = []\n\n qs = self.create_participacao()\n\n parlamentares = Mandato.objects.filter(qs,\n parlamentar__ativo=True\n ).prefetch_related('parlamentar').\\\n values_list('parlamentar',\n flat=True).distinct()\n\n qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\\\n exclude(id__in=id_part)\n eligible = self.verifica()\n result = list(set(qs) & set(eligible))\n if not cmp(result, eligible): # se igual a 0 significa que o qs e o eli s\u00e3o iguais!\n self.fields['parlamentar'].queryset = qs\n else:\n ids = [e.id for e in eligible]\n qs = Parlamentar.objects.filter(id__in=ids)\n self.fields['parlamentar'].queryset = qs\n\n\n def clean(self):\n cleaned_data = super(ParticipacaoCreateForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n\n\n def create_participacao(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n data_inicio_comissao = composicao.periodo.data_inicio\n data_fim_comissao = composicao.periodo.data_fim\n q1 = Q(data_fim_mandato__isnull=False,\n data_fim_mandato__gte=data_inicio_comissao)\n q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \\\n & Q(data_inicio_mandato__lte=data_fim_comissao)\n q3 = Q(data_fim_mandato__isnull=True,\n data_inicio_mandato__lte=data_inicio_comissao)\n qs = q1 | q2 | q3\n return qs\n\n def verifica(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n participantes = composicao.participacao_set.all()\n participantes_id = [p.parlamentar.id for p in participantes]\n parlamentares = Parlamentar.objects.all().exclude(\n id__in=participantes_id).order_by('nome_completo')\n parlamentares = [p for p in parlamentares if p.ativo]\n\n lista = []\n\n for p in parlamentares:\n mandatos = p.mandato_set.all()\n for m in mandatos:\n data_inicio = m.data_inicio_mandato\n data_fim = m.data_fim_mandato\n comp_data_inicio = composicao.periodo.data_inicio\n comp_data_fim = composicao.periodo.data_fim\n if (data_fim and data_fim >= comp_data_inicio)\\\n or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\\\n or (data_fim is None and data_inicio <= comp_data_inicio):\n lista.append(p)\n\n lista = list(set(lista))\n\n return lista\n\n\nclass ParticipacaoEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n nome_parlamentar = forms.CharField(required=False, label='Parlamentar')\n\n class Meta:\n model = Participacao\n fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',\n 'data_designacao', 'data_desligamento',\n 'motivo_desligamento', 'observacao']\n widgets = {\n 'parlamentar': forms.HiddenInput(),\n }\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoEditForm, self).__init__(**kwargs)\n self.initial['nome_parlamentar'] = Parlamentar.objects.get(\n id=self.initial['parlamentar']).nome_parlamentar\n self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'\n\n\nclass ComissaoForm(forms.ModelForm):\n\n class Meta:\n model = Comissao\n fields = '__all__'\n\n def __init__(self, user=None, **kwargs):\n super(ComissaoForm, self).__init__(**kwargs)\n inst = self.instance\n if inst.pk:\n if inst.tipo.natureza == 'P':\n self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'\n\n\n\n def clean(self):\n super(ComissaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if self.cleaned_data['data_extincao']:\n if (self.cleaned_data['data_extincao'] <\n self.cleaned_data['data_criacao']):\n msg = _('Data de extin\u00e7\u00e3o n\u00e3o pode ser menor que a de cria\u00e7\u00e3o')\n raise ValidationError(msg)\n return self.cleaned_data\n\n @transaction.atomic\n def save(self, commit=True):\n inst = self.instance\n if not inst.pk:\n comissao = super(ComissaoForm, self).save(commit)\n content_type = ContentType.objects.get_for_model(Comissao)\n object_id = comissao.pk\n tipo = TipoAutor.objects.get(descricao__icontains='Comiss')\n nome = comissao.sigla + ' - ' + comissao.nome\n Autor.objects.create(\n content_type=content_type,\n object_id=object_id,\n tipo=tipo,\n nome=nome\n )\n return comissao\n else:\n comissao = super(ComissaoForm, self).save(commit)\n return comissao\n\n\nclass ReuniaoForm(ModelForm):\n\n comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),\n widget=forms.HiddenInput())\n\n class Meta:\n model = Reuniao\n exclude = ['cod_andamento_reuniao']\n\n def clean(self):\n super(ReuniaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if self.cleaned_data['hora_fim']:\n if (self.cleaned_data['hora_fim'] <\n self.cleaned_data['hora_inicio']):\n msg = _('A hora de t\u00e9rmino da reuni\u00e3o n\u00e3o pode ser menor que a de in\u00edcio')\n raise ValidationError(msg)\n return self.cleaned_data\n\nclass DocumentoAcessorioCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n exclude = ['reuniao']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n comissao = reuniao.comissao\n comissao_pk = comissao.id\n documentos = reuniao.documentoacessorio_set.all()\n return self.create_documentoacessorio()\n\n\n def create_documentoacessorio(self):\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n\n\nclass DocumentoAcessorioEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n fields = ['nome', 'data', 'autor', 'ementa',\n 'indexacao', 'arquivo']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioEditForm, self).__init__(**kwargs)\n", "path": "sapl/comissoes/forms.py"}]}
| 3,547 | 628 |
gh_patches_debug_38044
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-339
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
since 4.3 can´t connect behind proxy
<!--
Thanks for reporting issues of python-telegram-bot!
To make it easier for us to help you please enter detailed information below.
-->
### Steps to reproduce
1. pip3 install python-telegram-bot --upgrade
2. Don´t send anything
3. pip3 install python-telegram-bot-4.2.1
4. Works again!
### Expected behaviour
Tell us what should happen
### Actual behaviour
Tell us what happens instead
### Configuration
**Operating System:**
**Version of Python:**
`$ python -V` 3.5
**Version of python-telegram-bot:**
4.3.2, 4.2.1
`$ python -c 'import telegram; print(telegram.__version__)'`
### Logs
Insert logs here (if necessary)
</issue>
<code>
[start of telegram/utils/request.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2016
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains methods to make POST and GET requests"""
20
21 import json
22 import socket
23 import logging
24
25 import certifi
26 import urllib3
27 from urllib3.connection import HTTPConnection
28
29 from telegram import (InputFile, TelegramError)
30 from telegram.error import Unauthorized, NetworkError, TimedOut, BadRequest
31
32 _CON_POOL = None
33 """:type: urllib3.PoolManager"""
34 CON_POOL_SIZE = 1
35
36 logging.getLogger('urllib3').setLevel(logging.WARNING)
37
38
39 def _get_con_pool():
40 global _CON_POOL
41
42 if _CON_POOL is not None:
43 return _CON_POOL
44
45 _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,
46 cert_reqs='CERT_REQUIRED',
47 ca_certs=certifi.where(),
48 socket_options=HTTPConnection.default_socket_options + [
49 (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
50 ])
51 return _CON_POOL
52
53
54 def is_con_pool_initialized():
55 return _CON_POOL is not None
56
57
58 def stop_con_pool():
59 global _CON_POOL
60 if _CON_POOL is not None:
61 _CON_POOL.clear()
62 _CON_POOL = None
63
64
65 def _parse(json_data):
66 """Try and parse the JSON returned from Telegram.
67
68 Returns:
69 dict: A JSON parsed as Python dict with results - on error this dict will be empty.
70
71 """
72 decoded_s = json_data.decode('utf-8')
73 try:
74 data = json.loads(decoded_s)
75 except ValueError:
76 raise TelegramError('Invalid server response')
77
78 if not data.get('ok') and data.get('description'):
79 return data['description']
80
81 return data['result']
82
83
84 def _request_wrapper(*args, **kwargs):
85 """Wraps urllib3 request for handling known exceptions.
86
87 Args:
88 args: unnamed arguments, passed to urllib3 request.
89 kwargs: keyword arguments, passed tp urllib3 request.
90
91 Returns:
92 str: A non-parsed JSON text.
93
94 Raises:
95 TelegramError
96
97 """
98
99 try:
100 resp = _get_con_pool().request(*args, **kwargs)
101 except urllib3.exceptions.TimeoutError as error:
102 raise TimedOut()
103 except urllib3.exceptions.HTTPError as error:
104 # HTTPError must come last as its the base urllib3 exception class
105 # TODO: do something smart here; for now just raise NetworkError
106 raise NetworkError('urllib3 HTTPError {0}'.format(error))
107
108 if 200 <= resp.status <= 299:
109 # 200-299 range are HTTP success statuses
110 return resp.data
111
112 try:
113 message = _parse(resp.data)
114 except ValueError:
115 raise NetworkError('Unknown HTTPError {0}'.format(resp.status))
116
117 if resp.status in (401, 403):
118 raise Unauthorized()
119 elif resp.status == 400:
120 raise BadRequest(repr(message))
121 elif resp.status == 502:
122 raise NetworkError('Bad Gateway')
123 else:
124 raise NetworkError('{0} ({1})'.format(message, resp.status))
125
126
127 def get(url):
128 """Request an URL.
129 Args:
130 url:
131 The web location we want to retrieve.
132
133 Returns:
134 A JSON object.
135
136 """
137 result = _request_wrapper('GET', url)
138
139 return _parse(result)
140
141
142 def post(url, data, timeout=None):
143 """Request an URL.
144 Args:
145 url:
146 The web location we want to retrieve.
147 data:
148 A dict of (str, unicode) key/value pairs.
149 timeout:
150 float. If this value is specified, use it as the definitive timeout (in
151 seconds) for urlopen() operations. [Optional]
152
153 Notes:
154 If neither `timeout` nor `data['timeout']` is specified. The underlying
155 defaults are used.
156
157 Returns:
158 A JSON object.
159
160 """
161 urlopen_kwargs = {}
162
163 if timeout is not None:
164 urlopen_kwargs['timeout'] = timeout
165
166 if InputFile.is_inputfile(data):
167 data = InputFile(data)
168 result = _request_wrapper('POST', url, body=data.to_form(), headers=data.headers)
169 else:
170 data = json.dumps(data)
171 result = _request_wrapper('POST',
172 url,
173 body=data.encode(),
174 headers={'Content-Type': 'application/json'},
175 **urlopen_kwargs)
176
177 return _parse(result)
178
179
180 def download(url, filename):
181 """Download a file by its URL.
182 Args:
183 url:
184 The web location we want to retrieve.
185
186 filename:
187 The filename within the path to download the file.
188
189 """
190 buf = _request_wrapper('GET', url)
191 with open(filename, 'wb') as fobj:
192 fobj.write(buf)
193
[end of telegram/utils/request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/telegram/utils/request.py b/telegram/utils/request.py
--- a/telegram/utils/request.py
+++ b/telegram/utils/request.py
@@ -19,6 +19,7 @@
"""This module contains methods to make POST and GET requests"""
import json
+import os
import socket
import logging
@@ -31,26 +32,41 @@
_CON_POOL = None
""":type: urllib3.PoolManager"""
+_CON_POOL_PROXY = None
+_CON_POOL_PROXY_KWARGS = {}
CON_POOL_SIZE = 1
logging.getLogger('urllib3').setLevel(logging.WARNING)
def _get_con_pool():
- global _CON_POOL
-
if _CON_POOL is not None:
return _CON_POOL
- _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,
- cert_reqs='CERT_REQUIRED',
- ca_certs=certifi.where(),
- socket_options=HTTPConnection.default_socket_options + [
- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
- ])
+ _init_con_pool()
return _CON_POOL
+def _init_con_pool():
+ global _CON_POOL
+ kwargs = dict(maxsize=CON_POOL_SIZE,
+ cert_reqs='CERT_REQUIRED',
+ ca_certs=certifi.where(),
+ socket_options=HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ])
+ proxy_url = _get_con_pool_proxy()
+ if not proxy_url:
+ mgr = urllib3.PoolManager
+ else:
+ kwargs['proxy_url'] = proxy_url
+ if _CON_POOL_PROXY_KWARGS:
+ kwargs.update(_CON_POOL_PROXY_KWARGS)
+ mgr = urllib3.ProxyManager
+
+ _CON_POOL = mgr(**kwargs)
+
+
def is_con_pool_initialized():
return _CON_POOL is not None
@@ -62,6 +78,47 @@
_CON_POOL = None
+def set_con_pool_proxy(url, **urllib3_kwargs):
+ """Setup connection pool behind a proxy
+
+ Args:
+ url (str): The URL to the proxy server. For example: `http://127.0.0.1:3128`
+ urllib3_kwargs (dict): Arbitrary arguments passed as-is to `urllib3.ProxyManager`
+
+ """
+ global _CON_POOL_PROXY
+ global _CON_POOL_PROXY_KWARGS
+
+ if is_con_pool_initialized():
+ raise TelegramError('conpool already initialized')
+
+ _CON_POOL_PROXY = url
+ _CON_POOL_PROXY_KWARGS = urllib3_kwargs
+
+
+def _get_con_pool_proxy():
+ """Return the user configured proxy according to the following order:
+
+ * proxy configured using `set_con_pool_proxy()`.
+ * proxy set in `HTTPS_PROXY` env. var.
+ * proxy set in `https_proxy` env. var.
+ * None (if no proxy is configured)
+
+ Returns:
+ str | None
+
+ """
+ if _CON_POOL_PROXY:
+ return _CON_POOL_PROXY
+ from_env = os.environ.get('HTTPS_PROXY')
+ if from_env:
+ return from_env
+ from_env = os.environ.get('https_proxy')
+ if from_env:
+ return from_env
+ return None
+
+
def _parse(json_data):
"""Try and parse the JSON returned from Telegram.
|
{"golden_diff": "diff --git a/telegram/utils/request.py b/telegram/utils/request.py\n--- a/telegram/utils/request.py\n+++ b/telegram/utils/request.py\n@@ -19,6 +19,7 @@\n \"\"\"This module contains methods to make POST and GET requests\"\"\"\n \n import json\n+import os\n import socket\n import logging\n \n@@ -31,26 +32,41 @@\n \n _CON_POOL = None\n \"\"\":type: urllib3.PoolManager\"\"\"\n+_CON_POOL_PROXY = None\n+_CON_POOL_PROXY_KWARGS = {}\n CON_POOL_SIZE = 1\n \n logging.getLogger('urllib3').setLevel(logging.WARNING)\n \n \n def _get_con_pool():\n- global _CON_POOL\n-\n if _CON_POOL is not None:\n return _CON_POOL\n \n- _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,\n- cert_reqs='CERT_REQUIRED',\n- ca_certs=certifi.where(),\n- socket_options=HTTPConnection.default_socket_options + [\n- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n- ])\n+ _init_con_pool()\n return _CON_POOL\n \n \n+def _init_con_pool():\n+ global _CON_POOL\n+ kwargs = dict(maxsize=CON_POOL_SIZE,\n+ cert_reqs='CERT_REQUIRED',\n+ ca_certs=certifi.where(),\n+ socket_options=HTTPConnection.default_socket_options + [\n+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n+ ])\n+ proxy_url = _get_con_pool_proxy()\n+ if not proxy_url:\n+ mgr = urllib3.PoolManager\n+ else:\n+ kwargs['proxy_url'] = proxy_url\n+ if _CON_POOL_PROXY_KWARGS:\n+ kwargs.update(_CON_POOL_PROXY_KWARGS)\n+ mgr = urllib3.ProxyManager\n+\n+ _CON_POOL = mgr(**kwargs)\n+\n+\n def is_con_pool_initialized():\n return _CON_POOL is not None\n \n@@ -62,6 +78,47 @@\n _CON_POOL = None\n \n \n+def set_con_pool_proxy(url, **urllib3_kwargs):\n+ \"\"\"Setup connection pool behind a proxy\n+\n+ Args:\n+ url (str): The URL to the proxy server. For example: `http://127.0.0.1:3128`\n+ urllib3_kwargs (dict): Arbitrary arguments passed as-is to `urllib3.ProxyManager`\n+\n+ \"\"\"\n+ global _CON_POOL_PROXY\n+ global _CON_POOL_PROXY_KWARGS\n+\n+ if is_con_pool_initialized():\n+ raise TelegramError('conpool already initialized')\n+\n+ _CON_POOL_PROXY = url\n+ _CON_POOL_PROXY_KWARGS = urllib3_kwargs\n+\n+\n+def _get_con_pool_proxy():\n+ \"\"\"Return the user configured proxy according to the following order:\n+\n+ * proxy configured using `set_con_pool_proxy()`.\n+ * proxy set in `HTTPS_PROXY` env. var.\n+ * proxy set in `https_proxy` env. var.\n+ * None (if no proxy is configured)\n+\n+ Returns:\n+ str | None\n+\n+ \"\"\"\n+ if _CON_POOL_PROXY:\n+ return _CON_POOL_PROXY\n+ from_env = os.environ.get('HTTPS_PROXY')\n+ if from_env:\n+ return from_env\n+ from_env = os.environ.get('https_proxy')\n+ if from_env:\n+ return from_env\n+ return None\n+\n+\n def _parse(json_data):\n \"\"\"Try and parse the JSON returned from Telegram.\n", "issue": "since 4.3 can\u00b4t connect behind proxy\n<!--\nThanks for reporting issues of python-telegram-bot!\nTo make it easier for us to help you please enter detailed information below.\n-->\n### Steps to reproduce\n1. pip3 install python-telegram-bot --upgrade\n2. Don\u00b4t send anything\n3. pip3 install python-telegram-bot-4.2.1\n4. Works again!\n### Expected behaviour\n\nTell us what should happen\n### Actual behaviour\n\nTell us what happens instead\n### Configuration\n\n**Operating System:**\n\n**Version of Python:**\n\n`$ python -V` 3.5\n\n**Version of python-telegram-bot:**\n4.3.2, 4.2.1\n`$ python -c 'import telegram; print(telegram.__version__)'`\n### Logs\n\nInsert logs here (if necessary)\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains methods to make POST and GET requests\"\"\"\n\nimport json\nimport socket\nimport logging\n\nimport certifi\nimport urllib3\nfrom urllib3.connection import HTTPConnection\n\nfrom telegram import (InputFile, TelegramError)\nfrom telegram.error import Unauthorized, NetworkError, TimedOut, BadRequest\n\n_CON_POOL = None\n\"\"\":type: urllib3.PoolManager\"\"\"\nCON_POOL_SIZE = 1\n\nlogging.getLogger('urllib3').setLevel(logging.WARNING)\n\n\ndef _get_con_pool():\n global _CON_POOL\n\n if _CON_POOL is not None:\n return _CON_POOL\n\n _CON_POOL = urllib3.PoolManager(maxsize=CON_POOL_SIZE,\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where(),\n socket_options=HTTPConnection.default_socket_options + [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n ])\n return _CON_POOL\n\n\ndef is_con_pool_initialized():\n return _CON_POOL is not None\n\n\ndef stop_con_pool():\n global _CON_POOL\n if _CON_POOL is not None:\n _CON_POOL.clear()\n _CON_POOL = None\n\n\ndef _parse(json_data):\n \"\"\"Try and parse the JSON returned from Telegram.\n\n Returns:\n dict: A JSON parsed as Python dict with results - on error this dict will be empty.\n\n \"\"\"\n decoded_s = json_data.decode('utf-8')\n try:\n data = json.loads(decoded_s)\n except ValueError:\n raise TelegramError('Invalid server response')\n\n if not data.get('ok') and data.get('description'):\n return data['description']\n\n return data['result']\n\n\ndef _request_wrapper(*args, **kwargs):\n \"\"\"Wraps urllib3 request for handling known exceptions.\n\n Args:\n args: unnamed arguments, passed to urllib3 request.\n kwargs: keyword arguments, passed tp urllib3 request.\n\n Returns:\n str: A non-parsed JSON text.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n try:\n resp = _get_con_pool().request(*args, **kwargs)\n except urllib3.exceptions.TimeoutError as error:\n raise TimedOut()\n except urllib3.exceptions.HTTPError as error:\n # HTTPError must come last as its the base urllib3 exception class\n # TODO: do something smart here; for now just raise NetworkError\n raise NetworkError('urllib3 HTTPError {0}'.format(error))\n\n if 200 <= resp.status <= 299:\n # 200-299 range are HTTP success statuses\n return resp.data\n\n try:\n message = _parse(resp.data)\n except ValueError:\n raise NetworkError('Unknown HTTPError {0}'.format(resp.status))\n\n if resp.status in (401, 403):\n raise Unauthorized()\n elif resp.status == 400:\n raise BadRequest(repr(message))\n elif resp.status == 502:\n raise NetworkError('Bad Gateway')\n else:\n raise NetworkError('{0} ({1})'.format(message, resp.status))\n\n\ndef get(url):\n \"\"\"Request an URL.\n Args:\n url:\n The web location we want to retrieve.\n\n Returns:\n A JSON object.\n\n \"\"\"\n result = _request_wrapper('GET', url)\n\n return _parse(result)\n\n\ndef post(url, data, timeout=None):\n \"\"\"Request an URL.\n Args:\n url:\n The web location we want to retrieve.\n data:\n A dict of (str, unicode) key/value pairs.\n timeout:\n float. If this value is specified, use it as the definitive timeout (in\n seconds) for urlopen() operations. [Optional]\n\n Notes:\n If neither `timeout` nor `data['timeout']` is specified. The underlying\n defaults are used.\n\n Returns:\n A JSON object.\n\n \"\"\"\n urlopen_kwargs = {}\n\n if timeout is not None:\n urlopen_kwargs['timeout'] = timeout\n\n if InputFile.is_inputfile(data):\n data = InputFile(data)\n result = _request_wrapper('POST', url, body=data.to_form(), headers=data.headers)\n else:\n data = json.dumps(data)\n result = _request_wrapper('POST',\n url,\n body=data.encode(),\n headers={'Content-Type': 'application/json'},\n **urlopen_kwargs)\n\n return _parse(result)\n\n\ndef download(url, filename):\n \"\"\"Download a file by its URL.\n Args:\n url:\n The web location we want to retrieve.\n\n filename:\n The filename within the path to download the file.\n\n \"\"\"\n buf = _request_wrapper('GET', url)\n with open(filename, 'wb') as fobj:\n fobj.write(buf)\n", "path": "telegram/utils/request.py"}]}
| 2,422 | 778 |
gh_patches_debug_30937
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-3097
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add http endpoint for the next event
### Is your feature request related to a problem? Please describe.
We want to use a dns redirect to redirect to the first upcoming Thalia event.
### Describe the solution you'd like
An endpoint (like /events/next) that redicects to the next Thalia event
### Motivation
We want to setup a domain (sog.je) that redirects to this
### Describe alternatives you've considered
Static html host that uses javascriot, but this would require using additional resources
</issue>
<code>
[start of website/events/views.py]
1 """Views provided by the events package."""
2
3 from django.contrib import messages
4 from django.contrib.auth.decorators import login_required
5 from django.shortcuts import get_object_or_404, redirect
6 from django.utils import timezone
7 from django.utils.decorators import method_decorator
8 from django.utils.translation import gettext_lazy as _
9 from django.views import View
10 from django.views.generic import DetailView, FormView, TemplateView
11
12 from events import services
13 from events.exceptions import RegistrationError
14 from events.models import categories
15 from events.services import is_user_registered
16 from payments.models import Payment
17
18 from .forms import FieldsForm
19 from .models import Event, EventRegistration
20
21
22 class EventIndex(TemplateView):
23 """Render the events calendar overview."""
24
25 template_name = "events/index.html"
26
27 def get_context_data(self, **kwargs):
28 context = super().get_context_data(**kwargs)
29
30 upcoming_activity = (
31 Event.objects.filter(published=True, end__gte=timezone.now())
32 .order_by("end")
33 .first()
34 )
35 context["upcoming_activity"] = upcoming_activity
36
37 return context
38
39
40 class EventDetail(DetailView):
41 """Render a single event detail page."""
42
43 model = Event
44 queryset = Event.objects.filter(published=True)
45 template_name = "events/event.html"
46 context_object_name = "event"
47
48 def get_context_data(self, **kwargs):
49 context = super().get_context_data(**kwargs)
50 context["user"] = self.request.user
51 context["payment_method_tpay"] = Payment.TPAY
52
53 event = context["event"]
54 if event.max_participants:
55 perc = 100.0 * len(event.participants) / event.max_participants
56 context["registration_percentage"] = perc
57
58 try:
59 context["registration"] = EventRegistration.objects.get(
60 event=event, member=self.request.member
61 )
62 except (EventRegistration.DoesNotExist, TypeError):
63 pass
64
65 registration_status = services.registration_status(
66 event, context.get("registration"), self.request.member
67 )
68 context["registration_status"] = services.registration_status_string(
69 registration_status, event, context.get("registration")
70 )
71
72 context["show_cancel_status"] = services.show_cancel_status(registration_status)
73 if context["show_cancel_status"]:
74 cancel_status = services.cancel_status(event, context.get("registration"))
75 context["cancel_info"] = services.cancel_info_string(
76 event, cancel_status, registration_status
77 )
78
79 context["permissions"] = services.event_permissions(self.request.member, event)
80
81 context["date_now"] = timezone.now()
82
83 context["slide_size"] = "slide"
84
85 context["participants"] = event.participants.select_related(
86 "member", "member__profile"
87 )
88
89 return context
90
91
92 class AlumniEventsView(TemplateView):
93 """Render the alumni events page."""
94
95 template_name = "events/alumni.html"
96
97 def get_context_data(self, **kwargs):
98 context = super().get_context_data(**kwargs)
99
100 events = Event.objects.filter(
101 published=True, category=categories.CATEGORY_ALUMNI, end__gte=timezone.now()
102 ).order_by("end")[:3]
103 context["events"] = events
104
105 return context
106
107
108 @method_decorator(login_required, name="dispatch")
109 class EventRegisterView(View):
110 """Define a view that allows the user to register for an event using a POST request.
111
112 The user should be authenticated.
113 """
114
115 def get(self, request, *args, **kwargs):
116 return redirect("events:event", pk=kwargs["pk"])
117
118 def post(self, request, *args, **kwargs):
119 event = get_object_or_404(Event, pk=kwargs["pk"])
120 try:
121 services.create_registration(request.member, event)
122
123 if event.has_fields:
124 return redirect("events:registration", event.pk)
125
126 messages.success(request, _("Registration successful."))
127 except RegistrationError as e:
128 messages.error(request, e)
129
130 return redirect(event)
131
132
133 @method_decorator(login_required, name="dispatch")
134 class EventCancelView(View):
135 """Define a view that allows the user to cancel their event registration using a POST request.
136
137 The user should be authenticated.
138 """
139
140 def get(self, request, *args, **kwargs):
141 return redirect("events:event", pk=kwargs["pk"])
142
143 def post(self, request, *args, **kwargs):
144 event = get_object_or_404(Event, pk=kwargs["pk"])
145 try:
146 services.cancel_registration(request.member, event)
147 messages.success(request, _("Registration successfully cancelled."))
148 except RegistrationError as e:
149 messages.error(request, e)
150
151 return redirect(event)
152
153
154 @method_decorator(login_required, name="dispatch")
155 class RegistrationView(FormView):
156 """Render a form that allows the user to change the details of their registration.
157
158 The user should be authenticated.
159 """
160
161 form_class = FieldsForm
162 template_name = "events/registration.html"
163 event = None
164
165 def get_context_data(self, **kwargs):
166 context = super().get_context_data(**kwargs)
167 context["event"] = self.event
168 return context
169
170 def get_form_kwargs(self):
171 kwargs = super().get_form_kwargs()
172 kwargs["fields"] = services.registration_fields(
173 self.request, self.request.member, self.event
174 )
175 return kwargs
176
177 def form_valid(self, form):
178 values = form.field_values()
179 try:
180 services.update_registration(
181 self.request.member, self.event, field_values=values
182 )
183 messages.success(self.request, _("Registration successfully saved."))
184 return redirect(self.event)
185 except RegistrationError as e:
186 messages.error(self.request, e)
187 return self.render_to_response(self.get_context_data(form=form))
188
189 def dispatch(self, request, *args, **kwargs):
190 self.event = get_object_or_404(Event, pk=self.kwargs["pk"])
191 try:
192 if self.event.has_fields:
193 return super().dispatch(request, *args, **kwargs)
194 except RegistrationError:
195 pass
196 return redirect(self.event)
197
198
199 @method_decorator(login_required, name="dispatch")
200 class MarkPresentView(View):
201 """A view that allows uses to mark their presence at an event using a secret token."""
202
203 def get(self, request, *args, **kwargs):
204 """Mark a user as present.
205
206 Checks if the url is correct, the event has not ended yet, and the user is registered.
207 """
208 event = get_object_or_404(Event, pk=kwargs["pk"])
209 if kwargs["token"] != event.mark_present_url_token:
210 messages.error(request, _("Invalid url."))
211 elif not request.member or not is_user_registered(request.member, event):
212 messages.error(request, _("You are not registered for this event."))
213 else:
214 registration = event.registrations.get(
215 member=request.member, date_cancelled=None
216 )
217
218 if registration.present:
219 messages.info(request, _("You were already marked as present."))
220 elif event.end < timezone.now():
221 messages.error(request, _("This event has already ended."))
222 else:
223 registration.present = True
224 registration.save()
225 messages.success(request, _("You have been marked as present."))
226
227 return redirect(event)
228
[end of website/events/views.py]
[start of website/events/urls.py]
1 """Routes defined by the events package."""
2 from django.urls import include, path
3
4 from events.feeds import EventFeed
5 from events.views import (
6 AlumniEventsView,
7 EventCancelView,
8 EventDetail,
9 EventIndex,
10 EventRegisterView,
11 MarkPresentView,
12 RegistrationView,
13 )
14
15 app_name = "events"
16
17 urlpatterns = [
18 path(
19 "events/",
20 include(
21 [
22 path("<int:pk>/", EventDetail.as_view(), name="event"),
23 path("<slug:slug>/", EventDetail.as_view(), name="event"),
24 path(
25 "<int:pk>/registration/register/",
26 EventRegisterView.as_view(),
27 name="register",
28 ),
29 path(
30 "<int:pk>/registration/cancel/",
31 EventCancelView.as_view(),
32 name="cancel",
33 ),
34 path(
35 "<int:pk>/mark-present/<uuid:token>/",
36 MarkPresentView.as_view(),
37 name="mark-present",
38 ),
39 path(
40 "<int:pk>/registration/",
41 RegistrationView.as_view(),
42 name="registration",
43 ),
44 path("", EventIndex.as_view(), name="index"),
45 path("ical/nl.ics", EventFeed(), name="ical-nl"),
46 path("ical/en.ics", EventFeed(), name="ical-en"),
47 ]
48 ),
49 ),
50 path("association/alumni/", AlumniEventsView.as_view(), name="alumni"),
51 ]
52
[end of website/events/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/events/urls.py b/website/events/urls.py
--- a/website/events/urls.py
+++ b/website/events/urls.py
@@ -9,6 +9,7 @@
EventIndex,
EventRegisterView,
MarkPresentView,
+ NextEventView,
RegistrationView,
)
@@ -20,6 +21,7 @@
include(
[
path("<int:pk>/", EventDetail.as_view(), name="event"),
+ path("next/", NextEventView.as_view(), name="next"),
path("<slug:slug>/", EventDetail.as_view(), name="event"),
path(
"<int:pk>/registration/register/",
diff --git a/website/events/views.py b/website/events/views.py
--- a/website/events/views.py
+++ b/website/events/views.py
@@ -2,6 +2,7 @@
from django.contrib import messages
from django.contrib.auth.decorators import login_required
+from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
@@ -225,3 +226,20 @@
messages.success(request, _("You have been marked as present."))
return redirect(event)
+
+
+class NextEventView(View):
+ def get(self, request, *args, **kwargs):
+ """HTTP redirect to the next event.
+
+ Checks if there is an upcoming event. Raise a 404 if none exists.
+ """
+ upcoming_activity = (
+ Event.objects.filter(published=True, end__gte=timezone.now())
+ .order_by("end")
+ .first()
+ )
+ if not upcoming_activity:
+ raise Http404("There is no upcoming event.")
+
+ return redirect(upcoming_activity)
|
{"golden_diff": "diff --git a/website/events/urls.py b/website/events/urls.py\n--- a/website/events/urls.py\n+++ b/website/events/urls.py\n@@ -9,6 +9,7 @@\n EventIndex,\n EventRegisterView,\n MarkPresentView,\n+ NextEventView,\n RegistrationView,\n )\n \n@@ -20,6 +21,7 @@\n include(\n [\n path(\"<int:pk>/\", EventDetail.as_view(), name=\"event\"),\n+ path(\"next/\", NextEventView.as_view(), name=\"next\"),\n path(\"<slug:slug>/\", EventDetail.as_view(), name=\"event\"),\n path(\n \"<int:pk>/registration/register/\",\ndiff --git a/website/events/views.py b/website/events/views.py\n--- a/website/events/views.py\n+++ b/website/events/views.py\n@@ -2,6 +2,7 @@\n \n from django.contrib import messages\n from django.contrib.auth.decorators import login_required\n+from django.http import Http404\n from django.shortcuts import get_object_or_404, redirect\n from django.utils import timezone\n from django.utils.decorators import method_decorator\n@@ -225,3 +226,20 @@\n messages.success(request, _(\"You have been marked as present.\"))\n \n return redirect(event)\n+\n+\n+class NextEventView(View):\n+ def get(self, request, *args, **kwargs):\n+ \"\"\"HTTP redirect to the next event.\n+\n+ Checks if there is an upcoming event. Raise a 404 if none exists.\n+ \"\"\"\n+ upcoming_activity = (\n+ Event.objects.filter(published=True, end__gte=timezone.now())\n+ .order_by(\"end\")\n+ .first()\n+ )\n+ if not upcoming_activity:\n+ raise Http404(\"There is no upcoming event.\")\n+\n+ return redirect(upcoming_activity)\n", "issue": "Add http endpoint for the next event \n### Is your feature request related to a problem? Please describe.\r\nWe want to use a dns redirect to redirect to the first upcoming Thalia event. \r\n\r\n### Describe the solution you'd like\r\nAn endpoint (like /events/next) that redicects to the next Thalia event \r\n### Motivation\r\nWe want to setup a domain (sog.je) that redirects to this \r\n\r\n### Describe alternatives you've considered\r\nStatic html host that uses javascriot, but this would require using additional resources \n", "before_files": [{"content": "\"\"\"Views provided by the events package.\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView, TemplateView\n\nfrom events import services\nfrom events.exceptions import RegistrationError\nfrom events.models import categories\nfrom events.services import is_user_registered\nfrom payments.models import Payment\n\nfrom .forms import FieldsForm\nfrom .models import Event, EventRegistration\n\n\nclass EventIndex(TemplateView):\n \"\"\"Render the events calendar overview.\"\"\"\n\n template_name = \"events/index.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n upcoming_activity = (\n Event.objects.filter(published=True, end__gte=timezone.now())\n .order_by(\"end\")\n .first()\n )\n context[\"upcoming_activity\"] = upcoming_activity\n\n return context\n\n\nclass EventDetail(DetailView):\n \"\"\"Render a single event detail page.\"\"\"\n\n model = Event\n queryset = Event.objects.filter(published=True)\n template_name = \"events/event.html\"\n context_object_name = \"event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"user\"] = self.request.user\n context[\"payment_method_tpay\"] = Payment.TPAY\n\n event = context[\"event\"]\n if event.max_participants:\n perc = 100.0 * len(event.participants) / event.max_participants\n context[\"registration_percentage\"] = perc\n\n try:\n context[\"registration\"] = EventRegistration.objects.get(\n event=event, member=self.request.member\n )\n except (EventRegistration.DoesNotExist, TypeError):\n pass\n\n registration_status = services.registration_status(\n event, context.get(\"registration\"), self.request.member\n )\n context[\"registration_status\"] = services.registration_status_string(\n registration_status, event, context.get(\"registration\")\n )\n\n context[\"show_cancel_status\"] = services.show_cancel_status(registration_status)\n if context[\"show_cancel_status\"]:\n cancel_status = services.cancel_status(event, context.get(\"registration\"))\n context[\"cancel_info\"] = services.cancel_info_string(\n event, cancel_status, registration_status\n )\n\n context[\"permissions\"] = services.event_permissions(self.request.member, event)\n\n context[\"date_now\"] = timezone.now()\n\n context[\"slide_size\"] = \"slide\"\n\n context[\"participants\"] = event.participants.select_related(\n \"member\", \"member__profile\"\n )\n\n return context\n\n\nclass AlumniEventsView(TemplateView):\n \"\"\"Render the alumni events page.\"\"\"\n\n template_name = \"events/alumni.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n events = Event.objects.filter(\n published=True, category=categories.CATEGORY_ALUMNI, end__gte=timezone.now()\n ).order_by(\"end\")[:3]\n context[\"events\"] = events\n\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EventRegisterView(View):\n \"\"\"Define a view that allows the user to register for an event using a POST request.\n\n The user should be authenticated.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n return redirect(\"events:event\", pk=kwargs[\"pk\"])\n\n def post(self, request, *args, **kwargs):\n event = get_object_or_404(Event, pk=kwargs[\"pk\"])\n try:\n services.create_registration(request.member, event)\n\n if event.has_fields:\n return redirect(\"events:registration\", event.pk)\n\n messages.success(request, _(\"Registration successful.\"))\n except RegistrationError as e:\n messages.error(request, e)\n\n return redirect(event)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass EventCancelView(View):\n \"\"\"Define a view that allows the user to cancel their event registration using a POST request.\n\n The user should be authenticated.\n \"\"\"\n\n def get(self, request, *args, **kwargs):\n return redirect(\"events:event\", pk=kwargs[\"pk\"])\n\n def post(self, request, *args, **kwargs):\n event = get_object_or_404(Event, pk=kwargs[\"pk\"])\n try:\n services.cancel_registration(request.member, event)\n messages.success(request, _(\"Registration successfully cancelled.\"))\n except RegistrationError as e:\n messages.error(request, e)\n\n return redirect(event)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RegistrationView(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"events/registration.html\"\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"event\"] = self.event\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, self.request.member, self.event\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n self.request.member, self.event, field_values=values\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n return redirect(self.event)\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n try:\n if self.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(self.event)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass MarkPresentView(View):\n \"\"\"A view that allows uses to mark their presence at an event using a secret token.\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Mark a user as present.\n\n Checks if the url is correct, the event has not ended yet, and the user is registered.\n \"\"\"\n event = get_object_or_404(Event, pk=kwargs[\"pk\"])\n if kwargs[\"token\"] != event.mark_present_url_token:\n messages.error(request, _(\"Invalid url.\"))\n elif not request.member or not is_user_registered(request.member, event):\n messages.error(request, _(\"You are not registered for this event.\"))\n else:\n registration = event.registrations.get(\n member=request.member, date_cancelled=None\n )\n\n if registration.present:\n messages.info(request, _(\"You were already marked as present.\"))\n elif event.end < timezone.now():\n messages.error(request, _(\"This event has already ended.\"))\n else:\n registration.present = True\n registration.save()\n messages.success(request, _(\"You have been marked as present.\"))\n\n return redirect(event)\n", "path": "website/events/views.py"}, {"content": "\"\"\"Routes defined by the events package.\"\"\"\nfrom django.urls import include, path\n\nfrom events.feeds import EventFeed\nfrom events.views import (\n AlumniEventsView,\n EventCancelView,\n EventDetail,\n EventIndex,\n EventRegisterView,\n MarkPresentView,\n RegistrationView,\n)\n\napp_name = \"events\"\n\nurlpatterns = [\n path(\n \"events/\",\n include(\n [\n path(\"<int:pk>/\", EventDetail.as_view(), name=\"event\"),\n path(\"<slug:slug>/\", EventDetail.as_view(), name=\"event\"),\n path(\n \"<int:pk>/registration/register/\",\n EventRegisterView.as_view(),\n name=\"register\",\n ),\n path(\n \"<int:pk>/registration/cancel/\",\n EventCancelView.as_view(),\n name=\"cancel\",\n ),\n path(\n \"<int:pk>/mark-present/<uuid:token>/\",\n MarkPresentView.as_view(),\n name=\"mark-present\",\n ),\n path(\n \"<int:pk>/registration/\",\n RegistrationView.as_view(),\n name=\"registration\",\n ),\n path(\"\", EventIndex.as_view(), name=\"index\"),\n path(\"ical/nl.ics\", EventFeed(), name=\"ical-nl\"),\n path(\"ical/en.ics\", EventFeed(), name=\"ical-en\"),\n ]\n ),\n ),\n path(\"association/alumni/\", AlumniEventsView.as_view(), name=\"alumni\"),\n]\n", "path": "website/events/urls.py"}]}
| 3,188 | 405 |
gh_patches_debug_17774
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-2580
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Static pages images URL ?
J'ai ajouté une cover_image à une page statique sur le serveur de DEMO sur cette page : https://geotrekdemo.ecrins-parcnational.fr/admin/flatpages/flatpage/3/change/ (admin / admin)
L'URL de l'image indiquée (dans l'Adminsite et dans l'API V2) est https://geotrekdemo.ecrins-parcnational.fr/media/paperclip/flatpages_flatpage/3/biodivecrins.jpg, mais elle n'est pas accessible.
L'image est bien sur mon serveur dans /opt/geotrek-admin/var/media/paperclip/flatpages_flatpage/3/
</issue>
<code>
[start of mapentity/views/base.py]
1 from __future__ import unicode_literals
2
3 from io import BytesIO
4 import json
5 import logging
6 import mimetypes
7 import os
8 from datetime import datetime
9 import re
10 from urllib.parse import quote
11
12 from django.conf import settings
13 from django.contrib.auth.decorators import login_required
14 from django.contrib.gis.db.models import GeometryField
15 from django.core.exceptions import PermissionDenied
16 from django.urls import reverse
17 from django.http import (HttpResponse, HttpResponseBadRequest, Http404)
18 from django.shortcuts import get_object_or_404
19 from django.views import static
20 from django.views.decorators.csrf import csrf_exempt
21 from django.views.decorators.http import require_http_methods
22 from django.views.generic.base import TemplateView
23 from paperclip.settings import get_attachment_permission, get_attachment_model
24
25 from mapentity import models as mapentity_models
26 from .mixins import JSONResponseMixin, FilterListMixin, ModelViewMixin
27 from ..decorators import view_permission_required
28 from ..helpers import capture_image
29 from ..settings import app_settings
30
31 logger = logging.getLogger(__name__)
32
33
34 def serve_attachment(request, path):
35 """
36 Serve media/ for authorized users only, since it can contain sensitive
37 information (uploaded documents)
38 """
39 original_path = re.sub(r'\.\d+x\d+_q\d+(_crop)?\.(jpg|png|jpeg)$', '', path, count=1, flags=re.IGNORECASE)
40 attachment = get_object_or_404(get_attachment_model(), attachment_file=original_path)
41 obj = attachment.content_object
42 if not issubclass(obj._meta.model, mapentity_models.MapEntityMixin):
43 raise Http404
44 if not obj.is_public():
45 if not request.user.is_authenticated:
46 raise PermissionDenied
47 if not request.user.has_perm(get_attachment_permission('read_attachment')):
48 raise PermissionDenied
49 if not request.user.has_perm('{}.read_{}'.format(obj._meta.app_label, obj._meta.model_name)):
50 raise PermissionDenied
51
52 content_type, encoding = mimetypes.guess_type(path)
53
54 if settings.DEBUG:
55 response = static.serve(request, path, settings.MEDIA_ROOT)
56 else:
57 response = HttpResponse()
58 response[app_settings['SENDFILE_HTTP_HEADER']] = os.path.join(settings.MEDIA_URL_SECURE, path)
59 response["Content-Type"] = content_type or 'application/octet-stream'
60 if encoding:
61 response["Content-Encoding"] = encoding
62 if app_settings['SERVE_MEDIA_AS_ATTACHMENT']:
63 response['Content-Disposition'] = "attachment; filename={0}".format(
64 os.path.basename(path))
65 return response
66
67
68 class JSSettings(JSONResponseMixin, TemplateView):
69 """
70 Javascript settings, in JSON format.
71 Likely to be overriden. Contains only necessary stuff
72 for mapentity.
73 """
74
75 def get_context_data(self):
76 dictsettings = {}
77 dictsettings['debug'] = settings.DEBUG
78 dictsettings['map'] = dict(
79 extent=getattr(settings, 'LEAFLET_CONFIG', {}).get('SPATIAL_EXTENT'),
80 styles=app_settings['MAP_STYLES'],
81 )
82
83 # URLs
84 root_url = app_settings['ROOT_URL']
85 root_url = root_url if root_url.endswith('/') else '{}/'.format(root_url)
86 dictsettings['urls'] = {}
87 dictsettings['urls']['root'] = root_url
88
89 from django.db import models
90 from mapentity.registry import MapEntityOptions
91
92 class ModelName(mapentity_models.MapEntityMixin, models.Model):
93 pass
94
95 options = MapEntityOptions(ModelName)
96
97 dictsettings['urls']['static'] = settings.STATIC_URL
98 dictsettings['urls']['layer'] = '{}{}'.format(root_url, options._url_path(mapentity_models.ENTITY_LAYER)[1:-1])
99 dictsettings['urls']['detail'] = '{}modelname/0/'.format(root_url)
100 dictsettings['urls']['format_list'] = '{}{}'.format(
101 root_url, options._url_path(mapentity_models.ENTITY_FORMAT_LIST)[1:-1]
102 )
103 dictsettings['urls']['screenshot'] = reverse("mapentity:map_screenshot")
104
105 # Useful for JS calendars
106 date_format = settings.DATE_INPUT_FORMATS[0].replace('%Y', 'yyyy').replace('%m', 'mm').replace('%d', 'dd')
107 dictsettings['date_format'] = date_format
108 # Languages
109 dictsettings['languages'] = dict(available=dict(app_settings['TRANSLATED_LANGUAGES']),
110 default=app_settings['LANGUAGE_CODE'])
111 return dictsettings
112
113
114 class BaseListView(FilterListMixin, ModelViewMixin):
115 columns = None
116
117 def __init__(self, *args, **kwargs):
118 super(BaseListView, self).__init__(*args, **kwargs)
119
120 if self.columns is None:
121 # All model fields except geometries
122 self.columns = [field.name for field in self.get_model()._meta.fields
123 if not isinstance(field, GeometryField)]
124 # Id column should be the first one
125 self.columns.remove('id')
126 self.columns.insert(0, 'id')
127
128 @view_permission_required()
129 def dispatch(self, *args, **kwargs):
130 return super(BaseListView, self).dispatch(*args, **kwargs)
131
132
133 @csrf_exempt
134 @login_required
135 def map_screenshot(request):
136 """
137 This view allows to take screenshots, via a django-screamshot service, of
138 the map **currently viewed by the user**.
139
140 - A context full of information is built on client-side and posted here.
141 - We reproduce this context, via headless browser, and take a capture
142 - We return the resulting image as attachment.
143
144 This seems overkill ? Please look around and find a better way.
145 """
146 try:
147 printcontext = request.POST['printcontext']
148 assert len(printcontext) < 2048, "Print context is way too big."
149
150 # Prepare context, extract and add infos
151 context = json.loads(printcontext)
152 selector = context.pop('selector')
153 map_url = context.pop('url')
154 map_url = request.build_absolute_uri(map_url)
155 context['print'] = True
156 printcontext = json.dumps(context)
157 contextencoded = quote(printcontext)
158 map_url += '?context=%s' % contextencoded
159 logger.debug("Capture %s" % map_url)
160
161 # Capture image and return it
162 width = context.get('viewport', {}).get('width')
163 height = context.get('viewport', {}).get('height')
164
165 stream = BytesIO()
166 capture_image(map_url, stream, width=width, height=height, selector=selector)
167 response = HttpResponse(stream.getvalue(), content_type='image/png')
168 response['Content-Disposition'] = 'attachment; filename=%s.png' % datetime.now().strftime('%Y%m%d-%H%M%S')
169 return response
170
171 except Exception as exc:
172 logger.exception(exc)
173 return HttpResponseBadRequest(exc)
174
175
176 @require_http_methods(["POST"])
177 @csrf_exempt
178 @login_required
179 def history_delete(request, path=None):
180 path = request.POST.get('path', path)
181 if path:
182 history = request.session.get('history')
183 if history:
184 history = [h for h in history if h['path'] != path]
185 request.session['history'] = history
186 return HttpResponse()
187
[end of mapentity/views/base.py]
[start of geotrek/flatpages/models.py]
1 import mimetypes
2
3 from django.contrib.contenttypes.fields import GenericRelation
4 from django.db import models
5 from django.utils.translation import gettext_lazy as _
6 from django.template.defaultfilters import slugify
7 from django.conf import settings
8 from django.urls import reverse
9
10 from bs4 import BeautifulSoup
11 from extended_choices import Choices
12
13 from mapentity.serializers import plain_text
14 from geotrek.common.mixins import TimeStampedModelMixin, BasePublishableMixin
15
16
17 FLATPAGES_TARGETS = Choices(
18 ('ALL', 'all', _('All')),
19 ('MOBILE', 'mobile', _('Mobile')),
20 ('HIDDEN', 'hidden', _('Hidden')),
21 ('WEB', 'web', _('Web')),
22 )
23
24
25 class FlatPage(BasePublishableMixin, TimeStampedModelMixin):
26 """
27 Manage *Geotrek-rando* static pages from Geotrek admin.
28
29 Historically, we started static pages as static HTML files within
30 *Geotrek-rando* folders.
31 """
32 title = models.CharField(verbose_name=_('Title'), max_length=200)
33 external_url = models.URLField(verbose_name=_('External URL'), blank=True, default='',
34 help_text=_('Link to external website instead of HTML content'))
35 content = models.TextField(verbose_name=_('Content'), null=True, blank=True,
36 help_text=_('HTML content'))
37 target = models.CharField(verbose_name=_('Target'), max_length=12, choices=FLATPAGES_TARGETS,
38 default=FLATPAGES_TARGETS.ALL)
39 source = models.ManyToManyField('common.RecordSource',
40 blank=True, related_name='flatpages',
41 verbose_name=_("Source"))
42 portal = models.ManyToManyField('common.TargetPortal',
43 blank=True, related_name='flatpages',
44 verbose_name=_("Portal"))
45 order = models.IntegerField(default=None, null=True, blank=True,
46 help_text=_("ID order if blank", ),
47 verbose_name=_("Order"))
48 attachments = GenericRelation(settings.PAPERCLIP_ATTACHMENT_MODEL)
49
50 @property
51 def slug(self):
52 return slugify(self.title)
53
54 class Meta:
55 verbose_name = _('Flat page')
56 verbose_name_plural = _('Flat pages')
57 ordering = ['order', 'id']
58 permissions = (
59 ("read_flatpage", "Can read FlatPage"),
60 )
61
62 def __str__(self):
63 return self.title
64
65 def get_permission_codename(self, *args):
66 return
67
68 def clean(self):
69 html_content = ''
70 for language in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:
71 html_content += getattr(self, 'content_%s' % language[0], None) or ''
72
73 def parse_media(self):
74 soup = BeautifulSoup(self.content or '', 'lxml')
75 images = soup.findAll('img')
76 results = []
77 for image in images:
78 url = image.get('src')
79 if url is None:
80 continue
81
82 mt = mimetypes.guess_type(url, strict=True)[0]
83 if mt is None:
84 mt = 'application/octet-stream'
85
86 results.append({
87 'url': url,
88 'title': image.get('title', ''),
89 'alt': image.get('alt', ''),
90 'mimetype': mt.split('/'),
91 })
92
93 return results
94
95 def get_add_url(self):
96 return reverse('admin:flatpages_flatpage_add')
97
98 def get_update_url(self):
99 return reverse('admin:flatpages_flatpage_change', args=[self.pk])
100
101 def get_delete_url(self):
102 return reverse('admin:flatpages_flatpage_delete', args=[self.pk])
103
104 @property
105 def rando_url(self):
106 return 'informations/{}/'.format(self.slug)
107
108 @property
109 def meta_description(self):
110 return plain_text(self.content)[:500]
111
[end of geotrek/flatpages/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/flatpages/models.py b/geotrek/flatpages/models.py
--- a/geotrek/flatpages/models.py
+++ b/geotrek/flatpages/models.py
@@ -108,3 +108,6 @@
@property
def meta_description(self):
return plain_text(self.content)[:500]
+
+ def is_public(self):
+ return self.any_published
diff --git a/mapentity/views/base.py b/mapentity/views/base.py
--- a/mapentity/views/base.py
+++ b/mapentity/views/base.py
@@ -39,7 +39,7 @@
original_path = re.sub(r'\.\d+x\d+_q\d+(_crop)?\.(jpg|png|jpeg)$', '', path, count=1, flags=re.IGNORECASE)
attachment = get_object_or_404(get_attachment_model(), attachment_file=original_path)
obj = attachment.content_object
- if not issubclass(obj._meta.model, mapentity_models.MapEntityMixin):
+ if not hasattr(obj._meta.model, 'attachments'):
raise Http404
if not obj.is_public():
if not request.user.is_authenticated:
|
{"golden_diff": "diff --git a/geotrek/flatpages/models.py b/geotrek/flatpages/models.py\n--- a/geotrek/flatpages/models.py\n+++ b/geotrek/flatpages/models.py\n@@ -108,3 +108,6 @@\n @property\n def meta_description(self):\n return plain_text(self.content)[:500]\n+\n+ def is_public(self):\n+ return self.any_published\ndiff --git a/mapentity/views/base.py b/mapentity/views/base.py\n--- a/mapentity/views/base.py\n+++ b/mapentity/views/base.py\n@@ -39,7 +39,7 @@\n original_path = re.sub(r'\\.\\d+x\\d+_q\\d+(_crop)?\\.(jpg|png|jpeg)$', '', path, count=1, flags=re.IGNORECASE)\n attachment = get_object_or_404(get_attachment_model(), attachment_file=original_path)\n obj = attachment.content_object\n- if not issubclass(obj._meta.model, mapentity_models.MapEntityMixin):\n+ if not hasattr(obj._meta.model, 'attachments'):\n raise Http404\n if not obj.is_public():\n if not request.user.is_authenticated:\n", "issue": "Static pages images URL ?\nJ'ai ajout\u00e9 une cover_image \u00e0 une page statique sur le serveur de DEMO sur cette page : https://geotrekdemo.ecrins-parcnational.fr/admin/flatpages/flatpage/3/change/ (admin / admin)\r\n\r\nL'URL de l'image indiqu\u00e9e (dans l'Adminsite et dans l'API V2) est https://geotrekdemo.ecrins-parcnational.fr/media/paperclip/flatpages_flatpage/3/biodivecrins.jpg, mais elle n'est pas accessible.\r\n\r\nL'image est bien sur mon serveur dans /opt/geotrek-admin/var/media/paperclip/flatpages_flatpage/3/\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom io import BytesIO\nimport json\nimport logging\nimport mimetypes\nimport os\nfrom datetime import datetime\nimport re\nfrom urllib.parse import quote\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.gis.db.models import GeometryField\nfrom django.core.exceptions import PermissionDenied\nfrom django.urls import reverse\nfrom django.http import (HttpResponse, HttpResponseBadRequest, Http404)\nfrom django.shortcuts import get_object_or_404\nfrom django.views import static\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_http_methods\nfrom django.views.generic.base import TemplateView\nfrom paperclip.settings import get_attachment_permission, get_attachment_model\n\nfrom mapentity import models as mapentity_models\nfrom .mixins import JSONResponseMixin, FilterListMixin, ModelViewMixin\nfrom ..decorators import view_permission_required\nfrom ..helpers import capture_image\nfrom ..settings import app_settings\n\nlogger = logging.getLogger(__name__)\n\n\ndef serve_attachment(request, path):\n \"\"\"\n Serve media/ for authorized users only, since it can contain sensitive\n information (uploaded documents)\n \"\"\"\n original_path = re.sub(r'\\.\\d+x\\d+_q\\d+(_crop)?\\.(jpg|png|jpeg)$', '', path, count=1, flags=re.IGNORECASE)\n attachment = get_object_or_404(get_attachment_model(), attachment_file=original_path)\n obj = attachment.content_object\n if not issubclass(obj._meta.model, mapentity_models.MapEntityMixin):\n raise Http404\n if not obj.is_public():\n if not request.user.is_authenticated:\n raise PermissionDenied\n if not request.user.has_perm(get_attachment_permission('read_attachment')):\n raise PermissionDenied\n if not request.user.has_perm('{}.read_{}'.format(obj._meta.app_label, obj._meta.model_name)):\n raise PermissionDenied\n\n content_type, encoding = mimetypes.guess_type(path)\n\n if settings.DEBUG:\n response = static.serve(request, path, settings.MEDIA_ROOT)\n else:\n response = HttpResponse()\n response[app_settings['SENDFILE_HTTP_HEADER']] = os.path.join(settings.MEDIA_URL_SECURE, path)\n response[\"Content-Type\"] = content_type or 'application/octet-stream'\n if encoding:\n response[\"Content-Encoding\"] = encoding\n if app_settings['SERVE_MEDIA_AS_ATTACHMENT']:\n response['Content-Disposition'] = \"attachment; filename={0}\".format(\n os.path.basename(path))\n return response\n\n\nclass JSSettings(JSONResponseMixin, TemplateView):\n \"\"\"\n Javascript settings, in JSON format.\n Likely to be overriden. Contains only necessary stuff\n for mapentity.\n \"\"\"\n\n def get_context_data(self):\n dictsettings = {}\n dictsettings['debug'] = settings.DEBUG\n dictsettings['map'] = dict(\n extent=getattr(settings, 'LEAFLET_CONFIG', {}).get('SPATIAL_EXTENT'),\n styles=app_settings['MAP_STYLES'],\n )\n\n # URLs\n root_url = app_settings['ROOT_URL']\n root_url = root_url if root_url.endswith('/') else '{}/'.format(root_url)\n dictsettings['urls'] = {}\n dictsettings['urls']['root'] = root_url\n\n from django.db import models\n from mapentity.registry import MapEntityOptions\n\n class ModelName(mapentity_models.MapEntityMixin, models.Model):\n pass\n\n options = MapEntityOptions(ModelName)\n\n dictsettings['urls']['static'] = settings.STATIC_URL\n dictsettings['urls']['layer'] = '{}{}'.format(root_url, options._url_path(mapentity_models.ENTITY_LAYER)[1:-1])\n dictsettings['urls']['detail'] = '{}modelname/0/'.format(root_url)\n dictsettings['urls']['format_list'] = '{}{}'.format(\n root_url, options._url_path(mapentity_models.ENTITY_FORMAT_LIST)[1:-1]\n )\n dictsettings['urls']['screenshot'] = reverse(\"mapentity:map_screenshot\")\n\n # Useful for JS calendars\n date_format = settings.DATE_INPUT_FORMATS[0].replace('%Y', 'yyyy').replace('%m', 'mm').replace('%d', 'dd')\n dictsettings['date_format'] = date_format\n # Languages\n dictsettings['languages'] = dict(available=dict(app_settings['TRANSLATED_LANGUAGES']),\n default=app_settings['LANGUAGE_CODE'])\n return dictsettings\n\n\nclass BaseListView(FilterListMixin, ModelViewMixin):\n columns = None\n\n def __init__(self, *args, **kwargs):\n super(BaseListView, self).__init__(*args, **kwargs)\n\n if self.columns is None:\n # All model fields except geometries\n self.columns = [field.name for field in self.get_model()._meta.fields\n if not isinstance(field, GeometryField)]\n # Id column should be the first one\n self.columns.remove('id')\n self.columns.insert(0, 'id')\n\n @view_permission_required()\n def dispatch(self, *args, **kwargs):\n return super(BaseListView, self).dispatch(*args, **kwargs)\n\n\n@csrf_exempt\n@login_required\ndef map_screenshot(request):\n \"\"\"\n This view allows to take screenshots, via a django-screamshot service, of\n the map **currently viewed by the user**.\n\n - A context full of information is built on client-side and posted here.\n - We reproduce this context, via headless browser, and take a capture\n - We return the resulting image as attachment.\n\n This seems overkill ? Please look around and find a better way.\n \"\"\"\n try:\n printcontext = request.POST['printcontext']\n assert len(printcontext) < 2048, \"Print context is way too big.\"\n\n # Prepare context, extract and add infos\n context = json.loads(printcontext)\n selector = context.pop('selector')\n map_url = context.pop('url')\n map_url = request.build_absolute_uri(map_url)\n context['print'] = True\n printcontext = json.dumps(context)\n contextencoded = quote(printcontext)\n map_url += '?context=%s' % contextencoded\n logger.debug(\"Capture %s\" % map_url)\n\n # Capture image and return it\n width = context.get('viewport', {}).get('width')\n height = context.get('viewport', {}).get('height')\n\n stream = BytesIO()\n capture_image(map_url, stream, width=width, height=height, selector=selector)\n response = HttpResponse(stream.getvalue(), content_type='image/png')\n response['Content-Disposition'] = 'attachment; filename=%s.png' % datetime.now().strftime('%Y%m%d-%H%M%S')\n return response\n\n except Exception as exc:\n logger.exception(exc)\n return HttpResponseBadRequest(exc)\n\n\n@require_http_methods([\"POST\"])\n@csrf_exempt\n@login_required\ndef history_delete(request, path=None):\n path = request.POST.get('path', path)\n if path:\n history = request.session.get('history')\n if history:\n history = [h for h in history if h['path'] != path]\n request.session['history'] = history\n return HttpResponse()\n", "path": "mapentity/views/base.py"}, {"content": "import mimetypes\n\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.template.defaultfilters import slugify\nfrom django.conf import settings\nfrom django.urls import reverse\n\nfrom bs4 import BeautifulSoup\nfrom extended_choices import Choices\n\nfrom mapentity.serializers import plain_text\nfrom geotrek.common.mixins import TimeStampedModelMixin, BasePublishableMixin\n\n\nFLATPAGES_TARGETS = Choices(\n ('ALL', 'all', _('All')),\n ('MOBILE', 'mobile', _('Mobile')),\n ('HIDDEN', 'hidden', _('Hidden')),\n ('WEB', 'web', _('Web')),\n)\n\n\nclass FlatPage(BasePublishableMixin, TimeStampedModelMixin):\n \"\"\"\n Manage *Geotrek-rando* static pages from Geotrek admin.\n\n Historically, we started static pages as static HTML files within\n *Geotrek-rando* folders.\n \"\"\"\n title = models.CharField(verbose_name=_('Title'), max_length=200)\n external_url = models.URLField(verbose_name=_('External URL'), blank=True, default='',\n help_text=_('Link to external website instead of HTML content'))\n content = models.TextField(verbose_name=_('Content'), null=True, blank=True,\n help_text=_('HTML content'))\n target = models.CharField(verbose_name=_('Target'), max_length=12, choices=FLATPAGES_TARGETS,\n default=FLATPAGES_TARGETS.ALL)\n source = models.ManyToManyField('common.RecordSource',\n blank=True, related_name='flatpages',\n verbose_name=_(\"Source\"))\n portal = models.ManyToManyField('common.TargetPortal',\n blank=True, related_name='flatpages',\n verbose_name=_(\"Portal\"))\n order = models.IntegerField(default=None, null=True, blank=True,\n help_text=_(\"ID order if blank\", ),\n verbose_name=_(\"Order\"))\n attachments = GenericRelation(settings.PAPERCLIP_ATTACHMENT_MODEL)\n\n @property\n def slug(self):\n return slugify(self.title)\n\n class Meta:\n verbose_name = _('Flat page')\n verbose_name_plural = _('Flat pages')\n ordering = ['order', 'id']\n permissions = (\n (\"read_flatpage\", \"Can read FlatPage\"),\n )\n\n def __str__(self):\n return self.title\n\n def get_permission_codename(self, *args):\n return\n\n def clean(self):\n html_content = ''\n for language in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:\n html_content += getattr(self, 'content_%s' % language[0], None) or ''\n\n def parse_media(self):\n soup = BeautifulSoup(self.content or '', 'lxml')\n images = soup.findAll('img')\n results = []\n for image in images:\n url = image.get('src')\n if url is None:\n continue\n\n mt = mimetypes.guess_type(url, strict=True)[0]\n if mt is None:\n mt = 'application/octet-stream'\n\n results.append({\n 'url': url,\n 'title': image.get('title', ''),\n 'alt': image.get('alt', ''),\n 'mimetype': mt.split('/'),\n })\n\n return results\n\n def get_add_url(self):\n return reverse('admin:flatpages_flatpage_add')\n\n def get_update_url(self):\n return reverse('admin:flatpages_flatpage_change', args=[self.pk])\n\n def get_delete_url(self):\n return reverse('admin:flatpages_flatpage_delete', args=[self.pk])\n\n @property\n def rando_url(self):\n return 'informations/{}/'.format(self.slug)\n\n @property\n def meta_description(self):\n return plain_text(self.content)[:500]\n", "path": "geotrek/flatpages/models.py"}]}
| 3,736 | 258 |
gh_patches_debug_9013
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1817
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
checkov silently returns [] when an `--external-checks-dir` doesn't have an init file
**Describe the bug**
When I use `--external-checks-dir` with a directory without an `__init__.py`, all `checkov` does is output `[]` when doing --output json.
**Expected behavior**
Yell at me for giving a directory without any `__init__.py` file in it.
</issue>
<code>
[start of checkov/common/runners/runner_registry.py]
1 import argparse
2 import itertools
3 import json
4 import logging
5 import os
6 from abc import abstractmethod
7 from typing import List, Union, Dict, Any, Tuple, Optional
8
9 from typing_extensions import Literal
10 import platform
11 import multiprocessing
12 from multiprocessing import Pipe
13
14 from cyclonedx.output import get_instance as get_cyclonedx_outputter
15
16 from checkov.common.bridgecrew.integration_features.integration_feature_registry import integration_feature_registry
17 from checkov.common.output.baseline import Baseline
18 from checkov.common.output.report import Report, report_to_cyclonedx
19 from checkov.common.runners.base_runner import BaseRunner
20 from checkov.common.util import data_structures_utils
21 from checkov.runner_filter import RunnerFilter
22 from checkov.terraform.context_parsers.registry import parser_registry
23 from checkov.terraform.runner import Runner as tf_runner
24 from checkov.terraform.parser import Parser
25
26
27 CHECK_BLOCK_TYPES = frozenset(["resource", "data", "provider", "module"])
28 OUTPUT_CHOICES = ["cli", "cyclonedx", "json", "junitxml", "github_failed_only", "sarif"]
29 OUTPUT_DELIMITER = "\n--- OUTPUT DELIMITER ---\n"
30
31
32 class RunnerRegistry:
33 runners: List[BaseRunner] = []
34 scan_reports: List[Report] = []
35 banner = ""
36
37 def __init__(self, banner: str, runner_filter: RunnerFilter, *runners: BaseRunner) -> None:
38 self.logger = logging.getLogger(__name__)
39 self.runner_filter = runner_filter
40 self.runners = list(runners)
41 self.banner = banner
42 self.scan_reports = []
43 self.filter_runner_framework()
44
45 @abstractmethod
46 def extract_entity_details(self, entity: Dict[str, Any]) -> Tuple[str, str, Dict[str, Any]]:
47 raise NotImplementedError()
48
49 def run(
50 self,
51 root_folder: Optional[str] = None,
52 external_checks_dir: Optional[List[str]] = None,
53 files: Optional[List[str]] = None,
54 guidelines: Optional[Dict[str, str]] = None,
55 collect_skip_comments: bool = True,
56 repo_root_for_plan_enrichment: Optional[List[Union[str, os.PathLike]]] = None,
57 ) -> List[Report]:
58 if platform.system() == 'Windows':
59 integration_feature_registry.run_pre_runner()
60 for runner in self.runners:
61 report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,
62 runner_filter=self.runner_filter, collect_skip_comments=collect_skip_comments)
63 self._handle_report(report, guidelines, repo_root_for_plan_enrichment)
64 return self.scan_reports
65
66 # use multiprocessing for unix os
67 logging.info("Running the runners using multiprocessing")
68 processes = []
69 integration_feature_registry.run_pre_runner()
70 for runner in self.runners:
71 parent_conn, child_conn = Pipe(duplex=False)
72 process = multiprocessing.get_context("fork").Process(target=RunnerRegistry._run_runner,
73 args=(runner, root_folder, external_checks_dir, files,
74 self.runner_filter, collect_skip_comments, child_conn))
75 processes.append((process, parent_conn))
76 process.start()
77
78 for process, parent_conn in processes:
79 scan_report = parent_conn.recv()
80 self._handle_report(scan_report, guidelines, repo_root_for_plan_enrichment)
81 return self.scan_reports
82
83 def _handle_report(self, scan_report, guidelines, repo_root_for_plan_enrichment):
84 integration_feature_registry.run_post_runner(scan_report)
85 if guidelines:
86 RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)
87 if repo_root_for_plan_enrichment:
88 enriched_resources = RunnerRegistry.get_enriched_resources(repo_root_for_plan_enrichment)
89 scan_report = Report("terraform_plan").enrich_plan_report(scan_report, enriched_resources)
90 scan_report = Report("terraform_plan").handle_skipped_checks(scan_report, enriched_resources)
91 self.scan_reports.append(scan_report)
92
93 @staticmethod
94 def _run_runner(runner, root_folder, external_checks_dir, files, runner_filter, collect_skip_comments,
95 child_conn):
96 report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,
97 runner_filter=runner_filter, collect_skip_comments=collect_skip_comments)
98 child_conn.send(report)
99 child_conn.close()
100
101 def print_reports(
102 self,
103 scan_reports: List[Report],
104 config: argparse.Namespace,
105 url: Optional[str] = None,
106 created_baseline_path: Optional[str] = None,
107 baseline: Optional[Baseline] = None,
108 ) -> Literal[0, 1]:
109 output_formats = set(config.output)
110
111 if "cli" in config.output and not config.quiet:
112 print(f"{self.banner}\n")
113 exit_codes = []
114 report_jsons = []
115 sarif_reports = []
116 junit_reports = []
117 cyclonedx_reports = []
118 for report in scan_reports:
119 if not report.is_empty():
120 if "json" in config.output:
121 report_jsons.append(report.get_dict(is_quiet=config.quiet, url=url))
122 if "junitxml" in config.output:
123 junit_reports.append(report)
124 # report.print_junit_xml()
125 if "github_failed_only" in config.output:
126 report.print_failed_github_md(use_bc_ids=config.output_bc_ids)
127 if "sarif" in config.output:
128 sarif_reports.append(report)
129 if "cli" in config.output:
130 report.print_console(
131 is_quiet=config.quiet,
132 is_compact=config.compact,
133 created_baseline_path=created_baseline_path,
134 baseline=baseline,
135 use_bc_ids=config.output_bc_ids,
136 )
137 if url:
138 print("More details: {}".format(url))
139 output_formats.discard("cli")
140 if output_formats:
141 print(OUTPUT_DELIMITER)
142 if "cyclonedx" in config.output:
143 cyclonedx_reports.append(report)
144 exit_codes.append(report.get_exit_code(config.soft_fail, config.soft_fail_on, config.hard_fail_on))
145
146 if "sarif" in config.output:
147 master_report = Report(None)
148 for report in sarif_reports:
149 master_report.failed_checks += report.failed_checks
150 master_report.print_sarif_report()
151 output_formats.remove("sarif")
152 if output_formats:
153 print(OUTPUT_DELIMITER)
154 if "json" in config.output:
155 if len(report_jsons) == 1:
156 print(json.dumps(report_jsons[0], indent=4))
157 else:
158 print(json.dumps(report_jsons, indent=4))
159 output_formats.remove("json")
160 if output_formats:
161 print(OUTPUT_DELIMITER)
162 if "junitxml" in config.output:
163 if len(junit_reports) == 1:
164 junit_reports[0].print_junit_xml(use_bc_ids=config.output_bc_ids)
165 else:
166 master_report = Report(None)
167 for report in junit_reports:
168 master_report.skipped_checks += report.skipped_checks
169 master_report.passed_checks += report.passed_checks
170 master_report.failed_checks += report.failed_checks
171 master_report.print_junit_xml(use_bc_ids=config.output_bc_ids)
172 output_formats.remove("junitxml")
173 if output_formats:
174 print(OUTPUT_DELIMITER)
175
176 if "cyclonedx" in config.output:
177 if cyclonedx_reports:
178 # More than one Report - combine Reports first
179 report = Report(None)
180 for r in cyclonedx_reports:
181 report.passed_checks += r.passed_checks
182 report.skipped_checks += r.skipped_checks
183 report.failed_checks += r.failed_checks
184 else:
185 report = cyclonedx_reports[0]
186 cyclonedx_output = get_cyclonedx_outputter(
187 bom=report.get_cyclonedx_bom()
188 )
189 print(cyclonedx_output.output_as_string())
190 output_formats.remove("cyclonedx")
191 if output_formats:
192 print(OUTPUT_DELIMITER)
193
194 exit_code = 1 if 1 in exit_codes else 0
195 return exit_code
196
197 def filter_runner_framework(self) -> None:
198 if not self.runner_filter:
199 return
200 if self.runner_filter.framework is None:
201 return
202 if self.runner_filter.framework == "all":
203 return
204 self.runners = [runner for runner in self.runners if runner.check_type in self.runner_filter.framework]
205
206 def remove_runner(self, runner: BaseRunner) -> None:
207 if runner in self.runners:
208 self.runners.remove(runner)
209
210 @staticmethod
211 def enrich_report_with_guidelines(scan_report: Report, guidelines: Dict[str, str]) -> None:
212 for record in itertools.chain(scan_report.failed_checks, scan_report.passed_checks, scan_report.skipped_checks):
213 if record.check_id in guidelines:
214 record.set_guideline(guidelines[record.check_id])
215
216 @staticmethod
217 def get_enriched_resources(repo_roots: List[Union[str, os.PathLike]]) -> Dict[str, Dict[str, Any]]:
218 repo_definitions = {}
219 for repo_root in repo_roots:
220 tf_definitions = {}
221 parsing_errors = {}
222 Parser().parse_directory(
223 directory=repo_root, # assume plan file is in the repo-root
224 out_definitions=tf_definitions,
225 out_parsing_errors=parsing_errors,
226 )
227 repo_definitions[repo_root] = { 'tf_definitions': tf_definitions, 'parsing_errors': parsing_errors }
228
229 enriched_resources = {}
230 for repo_root, parse_results in repo_definitions.items():
231 for full_file_path, definition in parse_results['tf_definitions'].items():
232 definitions_context = parser_registry.enrich_definitions_context((full_file_path, definition))
233 abs_scanned_file, _ = tf_runner._strip_module_referrer(full_file_path)
234 scanned_file = os.path.relpath(abs_scanned_file, repo_root)
235 for block_type, block_value in definition.items():
236 if block_type in CHECK_BLOCK_TYPES:
237 for entity in block_value:
238 context_parser = parser_registry.context_parsers[block_type]
239 definition_path = context_parser.get_entity_context_path(entity)
240 entity_id = ".".join(definition_path)
241 entity_context_path = [block_type] + definition_path
242 entity_context = data_structures_utils.get_inner_dict(
243 definitions_context[full_file_path], entity_context_path
244 )
245 entity_lines_range = [
246 entity_context.get("start_line"),
247 entity_context.get("end_line"),
248 ]
249 entity_code_lines = entity_context.get("code_lines")
250 skipped_checks = entity_context.get("skipped_checks")
251 enriched_resources[entity_id] = {
252 "entity_code_lines": entity_code_lines,
253 "entity_lines_range": entity_lines_range,
254 "scanned_file": scanned_file,
255 "skipped_checks": skipped_checks,
256 }
257 return enriched_resources
258
[end of checkov/common/runners/runner_registry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py
--- a/checkov/common/runners/runner_registry.py
+++ b/checkov/common/runners/runner_registry.py
@@ -152,7 +152,9 @@
if output_formats:
print(OUTPUT_DELIMITER)
if "json" in config.output:
- if len(report_jsons) == 1:
+ if not report_jsons:
+ print(json.dumps(Report(None).get_summary(), indent=4))
+ elif len(report_jsons) == 1:
print(json.dumps(report_jsons[0], indent=4))
else:
print(json.dumps(report_jsons, indent=4))
|
{"golden_diff": "diff --git a/checkov/common/runners/runner_registry.py b/checkov/common/runners/runner_registry.py\n--- a/checkov/common/runners/runner_registry.py\n+++ b/checkov/common/runners/runner_registry.py\n@@ -152,7 +152,9 @@\n if output_formats:\n print(OUTPUT_DELIMITER)\n if \"json\" in config.output:\n- if len(report_jsons) == 1:\n+ if not report_jsons:\n+ print(json.dumps(Report(None).get_summary(), indent=4))\n+ elif len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n", "issue": "checkov silently returns [] when an `--external-checks-dir` doesn't have an init file\n**Describe the bug**\r\nWhen I use `--external-checks-dir` with a directory without an `__init__.py`, all `checkov` does is output `[]` when doing --output json.\r\n\r\n\r\n**Expected behavior**\r\nYell at me for giving a directory without any `__init__.py` file in it.\r\n\n", "before_files": [{"content": "import argparse\nimport itertools\nimport json\nimport logging\nimport os\nfrom abc import abstractmethod\nfrom typing import List, Union, Dict, Any, Tuple, Optional\n\nfrom typing_extensions import Literal\nimport platform\nimport multiprocessing\nfrom multiprocessing import Pipe\n\nfrom cyclonedx.output import get_instance as get_cyclonedx_outputter\n\nfrom checkov.common.bridgecrew.integration_features.integration_feature_registry import integration_feature_registry\nfrom checkov.common.output.baseline import Baseline\nfrom checkov.common.output.report import Report, report_to_cyclonedx\nfrom checkov.common.runners.base_runner import BaseRunner\nfrom checkov.common.util import data_structures_utils\nfrom checkov.runner_filter import RunnerFilter\nfrom checkov.terraform.context_parsers.registry import parser_registry\nfrom checkov.terraform.runner import Runner as tf_runner\nfrom checkov.terraform.parser import Parser\n\n\nCHECK_BLOCK_TYPES = frozenset([\"resource\", \"data\", \"provider\", \"module\"])\nOUTPUT_CHOICES = [\"cli\", \"cyclonedx\", \"json\", \"junitxml\", \"github_failed_only\", \"sarif\"]\nOUTPUT_DELIMITER = \"\\n--- OUTPUT DELIMITER ---\\n\"\n\n\nclass RunnerRegistry:\n runners: List[BaseRunner] = []\n scan_reports: List[Report] = []\n banner = \"\"\n\n def __init__(self, banner: str, runner_filter: RunnerFilter, *runners: BaseRunner) -> None:\n self.logger = logging.getLogger(__name__)\n self.runner_filter = runner_filter\n self.runners = list(runners)\n self.banner = banner\n self.scan_reports = []\n self.filter_runner_framework()\n\n @abstractmethod\n def extract_entity_details(self, entity: Dict[str, Any]) -> Tuple[str, str, Dict[str, Any]]:\n raise NotImplementedError()\n\n def run(\n self,\n root_folder: Optional[str] = None,\n external_checks_dir: Optional[List[str]] = None,\n files: Optional[List[str]] = None,\n guidelines: Optional[Dict[str, str]] = None,\n collect_skip_comments: bool = True,\n repo_root_for_plan_enrichment: Optional[List[Union[str, os.PathLike]]] = None,\n ) -> List[Report]:\n if platform.system() == 'Windows':\n integration_feature_registry.run_pre_runner()\n for runner in self.runners:\n report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=self.runner_filter, collect_skip_comments=collect_skip_comments)\n self._handle_report(report, guidelines, repo_root_for_plan_enrichment)\n return self.scan_reports\n\n # use multiprocessing for unix os\n logging.info(\"Running the runners using multiprocessing\")\n processes = []\n integration_feature_registry.run_pre_runner()\n for runner in self.runners:\n parent_conn, child_conn = Pipe(duplex=False)\n process = multiprocessing.get_context(\"fork\").Process(target=RunnerRegistry._run_runner,\n args=(runner, root_folder, external_checks_dir, files,\n self.runner_filter, collect_skip_comments, child_conn))\n processes.append((process, parent_conn))\n process.start()\n\n for process, parent_conn in processes:\n scan_report = parent_conn.recv()\n self._handle_report(scan_report, guidelines, repo_root_for_plan_enrichment)\n return self.scan_reports\n\n def _handle_report(self, scan_report, guidelines, repo_root_for_plan_enrichment):\n integration_feature_registry.run_post_runner(scan_report)\n if guidelines:\n RunnerRegistry.enrich_report_with_guidelines(scan_report, guidelines)\n if repo_root_for_plan_enrichment:\n enriched_resources = RunnerRegistry.get_enriched_resources(repo_root_for_plan_enrichment)\n scan_report = Report(\"terraform_plan\").enrich_plan_report(scan_report, enriched_resources)\n scan_report = Report(\"terraform_plan\").handle_skipped_checks(scan_report, enriched_resources)\n self.scan_reports.append(scan_report)\n\n @staticmethod\n def _run_runner(runner, root_folder, external_checks_dir, files, runner_filter, collect_skip_comments,\n child_conn):\n report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files,\n runner_filter=runner_filter, collect_skip_comments=collect_skip_comments)\n child_conn.send(report)\n child_conn.close()\n\n def print_reports(\n self,\n scan_reports: List[Report],\n config: argparse.Namespace,\n url: Optional[str] = None,\n created_baseline_path: Optional[str] = None,\n baseline: Optional[Baseline] = None,\n ) -> Literal[0, 1]:\n output_formats = set(config.output)\n\n if \"cli\" in config.output and not config.quiet:\n print(f\"{self.banner}\\n\")\n exit_codes = []\n report_jsons = []\n sarif_reports = []\n junit_reports = []\n cyclonedx_reports = []\n for report in scan_reports:\n if not report.is_empty():\n if \"json\" in config.output:\n report_jsons.append(report.get_dict(is_quiet=config.quiet, url=url))\n if \"junitxml\" in config.output:\n junit_reports.append(report)\n # report.print_junit_xml()\n if \"github_failed_only\" in config.output:\n report.print_failed_github_md(use_bc_ids=config.output_bc_ids)\n if \"sarif\" in config.output:\n sarif_reports.append(report)\n if \"cli\" in config.output:\n report.print_console(\n is_quiet=config.quiet,\n is_compact=config.compact,\n created_baseline_path=created_baseline_path,\n baseline=baseline,\n use_bc_ids=config.output_bc_ids,\n )\n if url:\n print(\"More details: {}\".format(url))\n output_formats.discard(\"cli\")\n if output_formats:\n print(OUTPUT_DELIMITER)\n if \"cyclonedx\" in config.output:\n cyclonedx_reports.append(report)\n exit_codes.append(report.get_exit_code(config.soft_fail, config.soft_fail_on, config.hard_fail_on))\n\n if \"sarif\" in config.output:\n master_report = Report(None)\n for report in sarif_reports:\n master_report.failed_checks += report.failed_checks\n master_report.print_sarif_report()\n output_formats.remove(\"sarif\")\n if output_formats:\n print(OUTPUT_DELIMITER)\n if \"json\" in config.output:\n if len(report_jsons) == 1:\n print(json.dumps(report_jsons[0], indent=4))\n else:\n print(json.dumps(report_jsons, indent=4))\n output_formats.remove(\"json\")\n if output_formats:\n print(OUTPUT_DELIMITER)\n if \"junitxml\" in config.output:\n if len(junit_reports) == 1:\n junit_reports[0].print_junit_xml(use_bc_ids=config.output_bc_ids)\n else:\n master_report = Report(None)\n for report in junit_reports:\n master_report.skipped_checks += report.skipped_checks\n master_report.passed_checks += report.passed_checks\n master_report.failed_checks += report.failed_checks\n master_report.print_junit_xml(use_bc_ids=config.output_bc_ids)\n output_formats.remove(\"junitxml\")\n if output_formats:\n print(OUTPUT_DELIMITER)\n\n if \"cyclonedx\" in config.output:\n if cyclonedx_reports:\n # More than one Report - combine Reports first\n report = Report(None)\n for r in cyclonedx_reports:\n report.passed_checks += r.passed_checks\n report.skipped_checks += r.skipped_checks\n report.failed_checks += r.failed_checks\n else:\n report = cyclonedx_reports[0]\n cyclonedx_output = get_cyclonedx_outputter(\n bom=report.get_cyclonedx_bom()\n )\n print(cyclonedx_output.output_as_string())\n output_formats.remove(\"cyclonedx\")\n if output_formats:\n print(OUTPUT_DELIMITER)\n\n exit_code = 1 if 1 in exit_codes else 0\n return exit_code\n\n def filter_runner_framework(self) -> None:\n if not self.runner_filter:\n return\n if self.runner_filter.framework is None:\n return\n if self.runner_filter.framework == \"all\":\n return\n self.runners = [runner for runner in self.runners if runner.check_type in self.runner_filter.framework]\n\n def remove_runner(self, runner: BaseRunner) -> None:\n if runner in self.runners:\n self.runners.remove(runner)\n\n @staticmethod\n def enrich_report_with_guidelines(scan_report: Report, guidelines: Dict[str, str]) -> None:\n for record in itertools.chain(scan_report.failed_checks, scan_report.passed_checks, scan_report.skipped_checks):\n if record.check_id in guidelines:\n record.set_guideline(guidelines[record.check_id])\n\n @staticmethod\n def get_enriched_resources(repo_roots: List[Union[str, os.PathLike]]) -> Dict[str, Dict[str, Any]]:\n repo_definitions = {}\n for repo_root in repo_roots:\n tf_definitions = {}\n parsing_errors = {}\n Parser().parse_directory(\n directory=repo_root, # assume plan file is in the repo-root\n out_definitions=tf_definitions,\n out_parsing_errors=parsing_errors,\n )\n repo_definitions[repo_root] = { 'tf_definitions': tf_definitions, 'parsing_errors': parsing_errors }\n\n enriched_resources = {}\n for repo_root, parse_results in repo_definitions.items():\n for full_file_path, definition in parse_results['tf_definitions'].items():\n definitions_context = parser_registry.enrich_definitions_context((full_file_path, definition))\n abs_scanned_file, _ = tf_runner._strip_module_referrer(full_file_path)\n scanned_file = os.path.relpath(abs_scanned_file, repo_root)\n for block_type, block_value in definition.items():\n if block_type in CHECK_BLOCK_TYPES:\n for entity in block_value:\n context_parser = parser_registry.context_parsers[block_type]\n definition_path = context_parser.get_entity_context_path(entity)\n entity_id = \".\".join(definition_path)\n entity_context_path = [block_type] + definition_path\n entity_context = data_structures_utils.get_inner_dict(\n definitions_context[full_file_path], entity_context_path\n )\n entity_lines_range = [\n entity_context.get(\"start_line\"),\n entity_context.get(\"end_line\"),\n ]\n entity_code_lines = entity_context.get(\"code_lines\")\n skipped_checks = entity_context.get(\"skipped_checks\")\n enriched_resources[entity_id] = {\n \"entity_code_lines\": entity_code_lines,\n \"entity_lines_range\": entity_lines_range,\n \"scanned_file\": scanned_file,\n \"skipped_checks\": skipped_checks,\n }\n return enriched_resources\n", "path": "checkov/common/runners/runner_registry.py"}]}
| 3,578 | 160 |
gh_patches_debug_30630
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-1566
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DVF to DDF
**Is your feature request related to a problem? Please describe.**
The current repository only supports warping based on DDF, but not on DVF.
**Describe the solution you'd like**
Implement a DVF to DDF transformation layer. So for models outputting DVF, we can first transform DVF to DDF and then used the existing Warp layer to resample input image based on the transformed DDF.
**Describe alternatives you've considered**
N/A
**Additional context**
DeepReg implementation of [IntDVF](https://github.com/DeepRegNet/DeepReg/blob/80854094feafec998fa6237199066556c73f31f9/deepreg/model/layer.py#L508-L535)
</issue>
<code>
[start of monai/networks/blocks/__init__.py]
1 # Copyright 2020 - 2021 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from .acti_norm import ADN
13 from .activation import Mish, Swish
14 from .aspp import SimpleASPP
15 from .convolutions import Convolution, ResidualUnit
16 from .downsample import MaxAvgPool
17 from .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding
18 from .fcn import FCN, GCN, MCFCN, Refine
19 from .localnet_block import LocalNetDownSampleBlock, LocalNetFeatureExtractorBlock, LocalNetUpSampleBlock
20 from .segresnet_block import ResBlock
21 from .squeeze_and_excitation import (
22 ChannelSELayer,
23 ResidualSELayer,
24 SEBlock,
25 SEBottleneck,
26 SEResNetBottleneck,
27 SEResNeXtBottleneck,
28 )
29 from .upsample import SubpixelUpsample, Subpixelupsample, SubpixelUpSample, Upsample, UpSample
30 from .warp import Warp
31
[end of monai/networks/blocks/__init__.py]
[start of monai/networks/blocks/warp.py]
1 from typing import List, Optional, Union
2
3 import torch
4 from torch import nn
5 from torch.nn import functional as F
6
7 from monai.utils import GridSamplePadMode
8
9
10 class Warp(nn.Module):
11 """
12 Warp an image with given DDF.
13 """
14
15 def __init__(
16 self,
17 spatial_dims: int,
18 mode: int = 1,
19 padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS,
20 ):
21 """
22 Args:
23 spatial_dims: {2, 3}. number of spatial dimensions
24 mode: interpolation mode to calculate output values, defaults to 1.
25 Possible values are::
26
27 - 0 or 'nearest' or InterpolationType.nearest
28 - 1 or 'linear' or InterpolationType.linear
29 - 2 or 'quadratic' or InterpolationType.quadratic
30 - 3 or 'cubic' or InterpolationType.cubic
31 - 4 or 'fourth' or InterpolationType.fourth
32 - etc.
33 padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
34 Padding mode for outside grid values. Defaults to ``"border"``.
35 See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
36 """
37 super(Warp, self).__init__()
38 if spatial_dims not in [2, 3]:
39 raise ValueError(f"got unsupported spatial_dims={spatial_dims}, only support 2-d and 3-d input")
40 self.spatial_dims = spatial_dims
41 if mode < 0:
42 raise ValueError(f"do not support negative mode, got mode={mode}")
43 self.mode = mode
44 self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)
45
46 @staticmethod
47 def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor:
48 mesh_points = [torch.arange(0, dim) for dim in ddf.shape[2:]]
49 grid = torch.stack(torch.meshgrid(*mesh_points), dim=0) # (spatial_dims, ...)
50 grid = torch.stack([grid] * ddf.shape[0], dim=0) # (batch, spatial_dims, ...)
51 grid = grid.to(ddf)
52 return grid
53
54 @staticmethod
55 def normalize_grid(grid: torch.Tensor) -> torch.Tensor:
56 # (batch, ..., self.spatial_dims)
57 for i, dim in enumerate(grid.shape[1:-1]):
58 grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1
59 return grid
60
61 def forward(self, image: torch.Tensor, ddf: torch.Tensor) -> torch.Tensor:
62 """
63 Args:
64 image: Tensor in shape (batch, num_channels, H, W[, D])
65 ddf: Tensor in the same spatial size as image, in shape (batch, spatial_dims, H, W[, D])
66
67 Returns:
68 warped_image in the same shape as image (batch, num_channels, H, W[, D])
69 """
70 if len(image.shape) != 2 + self.spatial_dims:
71 raise ValueError(f"expecting {self.spatial_dims + 2}-d input, " f"got input in shape {image.shape}")
72 if len(ddf.shape) != 2 + self.spatial_dims or ddf.shape[1] != self.spatial_dims:
73 raise ValueError(
74 f"expecting {self.spatial_dims + 2}-d ddf with {self.spatial_dims} channels, "
75 f"got ddf in shape {ddf.shape}"
76 )
77 if image.shape[0] != ddf.shape[0] or image.shape[2:] != ddf.shape[2:]:
78 raise ValueError(
79 "expecting image and ddf of same batch size and spatial size, "
80 f"got image of shape {image.shape}, ddf of shape {ddf.shape}"
81 )
82
83 grid = self.get_reference_grid(ddf) + ddf
84 grid = grid.permute([0] + list(range(2, 2 + self.spatial_dims)) + [1]) # (batch, ..., self.spatial_dims)
85
86 if self.mode > 1:
87 raise ValueError(f"{self.mode}-order interpolation not yet implemented.")
88 # if not USE_COMPILED:
89 # raise ValueError(f"cannot perform {self.mode}-order interpolation without C compile.")
90 # _padding_mode = self.padding_mode.value
91 # if _padding_mode == "zeros":
92 # bound = 7
93 # elif _padding_mode == "border":
94 # bound = 0
95 # else:
96 # bound = 1
97 # warped_image: torch.Tensor = grid_pull(
98 # image,
99 # grid,
100 # bound=bound,
101 # extrapolate=True,
102 # interpolation=self.mode,
103 # )
104 else:
105 grid = self.normalize_grid(grid)
106 index_ordering: List[int] = list(range(self.spatial_dims - 1, -1, -1))
107 grid = grid[..., index_ordering] # z, y, x -> x, y, z
108 _interp_mode = "bilinear" if self.mode == 1 else "nearest"
109 warped_image = F.grid_sample(
110 image, grid, mode=_interp_mode, padding_mode=self.padding_mode.value, align_corners=True
111 )
112
113 return warped_image
114
[end of monai/networks/blocks/warp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py
--- a/monai/networks/blocks/__init__.py
+++ b/monai/networks/blocks/__init__.py
@@ -27,4 +27,4 @@
SEResNeXtBottleneck,
)
from .upsample import SubpixelUpsample, Subpixelupsample, SubpixelUpSample, Upsample, UpSample
-from .warp import Warp
+from .warp import DVF2DDF, Warp
diff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py
--- a/monai/networks/blocks/warp.py
+++ b/monai/networks/blocks/warp.py
@@ -62,7 +62,7 @@
"""
Args:
image: Tensor in shape (batch, num_channels, H, W[, D])
- ddf: Tensor in the same spatial size as image, in shape (batch, spatial_dims, H, W[, D])
+ ddf: Tensor in the same spatial size as image, in shape (batch, ``spatial_dims``, H, W[, D])
Returns:
warped_image in the same shape as image (batch, num_channels, H, W[, D])
@@ -111,3 +111,40 @@
)
return warped_image
+
+
+class DVF2DDF(nn.Module):
+ """
+ Layer calculates a dense velocity field (DVF) from a dense displacement field (DDF)
+ with scaling and squaring.
+
+ Adapted from:
+ DeepReg (https://github.com/DeepRegNet/DeepReg)
+
+ """
+
+ def __init__(
+ self,
+ spatial_dims: int,
+ num_steps: int = 7,
+ mode: int = 1,
+ padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS,
+ ):
+ super(DVF2DDF, self).__init__()
+ if num_steps <= 0:
+ raise ValueError(f"expecting positive num_steps, got {num_steps}")
+ self.num_steps = num_steps
+ self.warp_layer = Warp(spatial_dims=spatial_dims, mode=mode, padding_mode=padding_mode)
+
+ def forward(self, dvf):
+ """
+ Args:
+ dvf: dvf to be transformed, in shape (batch, ``spatial_dims``, H, W[,D])
+
+ Returns:
+
+ """
+ ddf: torch.Tensor = dvf / (2 ** self.num_steps)
+ for _ in range(self.num_steps):
+ ddf = ddf + self.warp_layer(image=ddf, ddf=ddf)
+ return ddf
|
{"golden_diff": "diff --git a/monai/networks/blocks/__init__.py b/monai/networks/blocks/__init__.py\n--- a/monai/networks/blocks/__init__.py\n+++ b/monai/networks/blocks/__init__.py\n@@ -27,4 +27,4 @@\n SEResNeXtBottleneck,\n )\n from .upsample import SubpixelUpsample, Subpixelupsample, SubpixelUpSample, Upsample, UpSample\n-from .warp import Warp\n+from .warp import DVF2DDF, Warp\ndiff --git a/monai/networks/blocks/warp.py b/monai/networks/blocks/warp.py\n--- a/monai/networks/blocks/warp.py\n+++ b/monai/networks/blocks/warp.py\n@@ -62,7 +62,7 @@\n \"\"\"\n Args:\n image: Tensor in shape (batch, num_channels, H, W[, D])\n- ddf: Tensor in the same spatial size as image, in shape (batch, spatial_dims, H, W[, D])\n+ ddf: Tensor in the same spatial size as image, in shape (batch, ``spatial_dims``, H, W[, D])\n \n Returns:\n warped_image in the same shape as image (batch, num_channels, H, W[, D])\n@@ -111,3 +111,40 @@\n )\n \n return warped_image\n+\n+\n+class DVF2DDF(nn.Module):\n+ \"\"\"\n+ Layer calculates a dense velocity field (DVF) from a dense displacement field (DDF)\n+ with scaling and squaring.\n+\n+ Adapted from:\n+ DeepReg (https://github.com/DeepRegNet/DeepReg)\n+\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ spatial_dims: int,\n+ num_steps: int = 7,\n+ mode: int = 1,\n+ padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS,\n+ ):\n+ super(DVF2DDF, self).__init__()\n+ if num_steps <= 0:\n+ raise ValueError(f\"expecting positive num_steps, got {num_steps}\")\n+ self.num_steps = num_steps\n+ self.warp_layer = Warp(spatial_dims=spatial_dims, mode=mode, padding_mode=padding_mode)\n+\n+ def forward(self, dvf):\n+ \"\"\"\n+ Args:\n+ dvf: dvf to be transformed, in shape (batch, ``spatial_dims``, H, W[,D])\n+\n+ Returns:\n+\n+ \"\"\"\n+ ddf: torch.Tensor = dvf / (2 ** self.num_steps)\n+ for _ in range(self.num_steps):\n+ ddf = ddf + self.warp_layer(image=ddf, ddf=ddf)\n+ return ddf\n", "issue": "DVF to DDF\n**Is your feature request related to a problem? Please describe.**\r\nThe current repository only supports warping based on DDF, but not on DVF.\r\n\r\n**Describe the solution you'd like**\r\nImplement a DVF to DDF transformation layer. So for models outputting DVF, we can first transform DVF to DDF and then used the existing Warp layer to resample input image based on the transformed DDF.\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nDeepReg implementation of [IntDVF](https://github.com/DeepRegNet/DeepReg/blob/80854094feafec998fa6237199066556c73f31f9/deepreg/model/layer.py#L508-L535)\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .acti_norm import ADN\nfrom .activation import Mish, Swish\nfrom .aspp import SimpleASPP\nfrom .convolutions import Convolution, ResidualUnit\nfrom .downsample import MaxAvgPool\nfrom .dynunet_block import UnetBasicBlock, UnetOutBlock, UnetResBlock, UnetUpBlock, get_output_padding, get_padding\nfrom .fcn import FCN, GCN, MCFCN, Refine\nfrom .localnet_block import LocalNetDownSampleBlock, LocalNetFeatureExtractorBlock, LocalNetUpSampleBlock\nfrom .segresnet_block import ResBlock\nfrom .squeeze_and_excitation import (\n ChannelSELayer,\n ResidualSELayer,\n SEBlock,\n SEBottleneck,\n SEResNetBottleneck,\n SEResNeXtBottleneck,\n)\nfrom .upsample import SubpixelUpsample, Subpixelupsample, SubpixelUpSample, Upsample, UpSample\nfrom .warp import Warp\n", "path": "monai/networks/blocks/__init__.py"}, {"content": "from typing import List, Optional, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom monai.utils import GridSamplePadMode\n\n\nclass Warp(nn.Module):\n \"\"\"\n Warp an image with given DDF.\n \"\"\"\n\n def __init__(\n self,\n spatial_dims: int,\n mode: int = 1,\n padding_mode: Optional[Union[GridSamplePadMode, str]] = GridSamplePadMode.ZEROS,\n ):\n \"\"\"\n Args:\n spatial_dims: {2, 3}. number of spatial dimensions\n mode: interpolation mode to calculate output values, defaults to 1.\n Possible values are::\n\n - 0 or 'nearest' or InterpolationType.nearest\n - 1 or 'linear' or InterpolationType.linear\n - 2 or 'quadratic' or InterpolationType.quadratic\n - 3 or 'cubic' or InterpolationType.cubic\n - 4 or 'fourth' or InterpolationType.fourth\n - etc.\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n \"\"\"\n super(Warp, self).__init__()\n if spatial_dims not in [2, 3]:\n raise ValueError(f\"got unsupported spatial_dims={spatial_dims}, only support 2-d and 3-d input\")\n self.spatial_dims = spatial_dims\n if mode < 0:\n raise ValueError(f\"do not support negative mode, got mode={mode}\")\n self.mode = mode\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n\n @staticmethod\n def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor:\n mesh_points = [torch.arange(0, dim) for dim in ddf.shape[2:]]\n grid = torch.stack(torch.meshgrid(*mesh_points), dim=0) # (spatial_dims, ...)\n grid = torch.stack([grid] * ddf.shape[0], dim=0) # (batch, spatial_dims, ...)\n grid = grid.to(ddf)\n return grid\n\n @staticmethod\n def normalize_grid(grid: torch.Tensor) -> torch.Tensor:\n # (batch, ..., self.spatial_dims)\n for i, dim in enumerate(grid.shape[1:-1]):\n grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1\n return grid\n\n def forward(self, image: torch.Tensor, ddf: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n image: Tensor in shape (batch, num_channels, H, W[, D])\n ddf: Tensor in the same spatial size as image, in shape (batch, spatial_dims, H, W[, D])\n\n Returns:\n warped_image in the same shape as image (batch, num_channels, H, W[, D])\n \"\"\"\n if len(image.shape) != 2 + self.spatial_dims:\n raise ValueError(f\"expecting {self.spatial_dims + 2}-d input, \" f\"got input in shape {image.shape}\")\n if len(ddf.shape) != 2 + self.spatial_dims or ddf.shape[1] != self.spatial_dims:\n raise ValueError(\n f\"expecting {self.spatial_dims + 2}-d ddf with {self.spatial_dims} channels, \"\n f\"got ddf in shape {ddf.shape}\"\n )\n if image.shape[0] != ddf.shape[0] or image.shape[2:] != ddf.shape[2:]:\n raise ValueError(\n \"expecting image and ddf of same batch size and spatial size, \"\n f\"got image of shape {image.shape}, ddf of shape {ddf.shape}\"\n )\n\n grid = self.get_reference_grid(ddf) + ddf\n grid = grid.permute([0] + list(range(2, 2 + self.spatial_dims)) + [1]) # (batch, ..., self.spatial_dims)\n\n if self.mode > 1:\n raise ValueError(f\"{self.mode}-order interpolation not yet implemented.\")\n # if not USE_COMPILED:\n # raise ValueError(f\"cannot perform {self.mode}-order interpolation without C compile.\")\n # _padding_mode = self.padding_mode.value\n # if _padding_mode == \"zeros\":\n # bound = 7\n # elif _padding_mode == \"border\":\n # bound = 0\n # else:\n # bound = 1\n # warped_image: torch.Tensor = grid_pull(\n # image,\n # grid,\n # bound=bound,\n # extrapolate=True,\n # interpolation=self.mode,\n # )\n else:\n grid = self.normalize_grid(grid)\n index_ordering: List[int] = list(range(self.spatial_dims - 1, -1, -1))\n grid = grid[..., index_ordering] # z, y, x -> x, y, z\n _interp_mode = \"bilinear\" if self.mode == 1 else \"nearest\"\n warped_image = F.grid_sample(\n image, grid, mode=_interp_mode, padding_mode=self.padding_mode.value, align_corners=True\n )\n\n return warped_image\n", "path": "monai/networks/blocks/warp.py"}]}
| 2,557 | 638 |
gh_patches_debug_5833
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-499
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Different behaviour of argmax
I found the behaviour of `argmax` is different between `numpy` and `cupy` if the array has `0` in its shape and `axis` argument is used. I don't know whether this behaviour is intended or not.
```
np.empty((0, 1)).argmax(axis=1) # array([], dtype=int64)
cupy.empty((0, 1)).argmax(axis=1)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-a5737d72bcba> in <module>()
----> 1 cupy.empty((0, 1)).argmax(axis=1)
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17701)()
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17556)()
cupy/core/reduction.pxi in cupy.core.core.simple_reduction_function.__call__ (cupy/core/core.cpp:52697)()
ValueError: zero-size array to reduction operation cupy_argmax which has no identity
```
I used cupy 2.0.0a1.
</issue>
<code>
[start of cupy/logic/truth.py]
1 def all(a, axis=None, out=None, keepdims=False):
2 # TODO(okuta): check type
3 return a.all(axis=axis, out=out, keepdims=keepdims)
4
5
6 def any(a, axis=None, out=None, keepdims=False):
7 # TODO(okuta): check type
8 return a.any(axis=axis, out=out, keepdims=keepdims)
9
[end of cupy/logic/truth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cupy/logic/truth.py b/cupy/logic/truth.py
--- a/cupy/logic/truth.py
+++ b/cupy/logic/truth.py
@@ -1,8 +1,11 @@
+import cupy
+
+
def all(a, axis=None, out=None, keepdims=False):
- # TODO(okuta): check type
+ assert isinstance(a, cupy.ndarray)
return a.all(axis=axis, out=out, keepdims=keepdims)
def any(a, axis=None, out=None, keepdims=False):
- # TODO(okuta): check type
+ assert isinstance(a, cupy.ndarray)
return a.any(axis=axis, out=out, keepdims=keepdims)
|
{"golden_diff": "diff --git a/cupy/logic/truth.py b/cupy/logic/truth.py\n--- a/cupy/logic/truth.py\n+++ b/cupy/logic/truth.py\n@@ -1,8 +1,11 @@\n+import cupy\n+\n+\n def all(a, axis=None, out=None, keepdims=False):\n- # TODO(okuta): check type\n+ assert isinstance(a, cupy.ndarray)\n return a.all(axis=axis, out=out, keepdims=keepdims)\n \n \n def any(a, axis=None, out=None, keepdims=False):\n- # TODO(okuta): check type\n+ assert isinstance(a, cupy.ndarray)\n return a.any(axis=axis, out=out, keepdims=keepdims)\n", "issue": "Different behaviour of argmax\nI found the behaviour of `argmax` is different between `numpy` and `cupy` if the array has `0` in its shape and `axis` argument is used. I don't know whether this behaviour is intended or not.\r\n\r\n```\r\nnp.empty((0, 1)).argmax(axis=1) # array([], dtype=int64)\r\ncupy.empty((0, 1)).argmax(axis=1)\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-9-a5737d72bcba> in <module>()\r\n----> 1 cupy.empty((0, 1)).argmax(axis=1)\r\n\r\ncupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17701)()\r\n\r\ncupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17556)()\r\n\r\ncupy/core/reduction.pxi in cupy.core.core.simple_reduction_function.__call__ (cupy/core/core.cpp:52697)()\r\n\r\nValueError: zero-size array to reduction operation cupy_argmax which has no identity\r\n```\r\n\r\nI used cupy 2.0.0a1.\n", "before_files": [{"content": "def all(a, axis=None, out=None, keepdims=False):\n # TODO(okuta): check type\n return a.all(axis=axis, out=out, keepdims=keepdims)\n\n\ndef any(a, axis=None, out=None, keepdims=False):\n # TODO(okuta): check type\n return a.any(axis=axis, out=out, keepdims=keepdims)\n", "path": "cupy/logic/truth.py"}]}
| 899 | 161 |
gh_patches_debug_33801
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1258
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uninstrument Existing fastapi instances
Existing instances shouldn't continue to send any telemetry after the `uninstrument`.
_Originally posted by @srikanthccv in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1199#discussion_r957094027_
`uninstrument` method of `FastAPIInstrumentor` restore the original `fastapi.FastAPI` class, but all the existing objects of the earlier instrumented class do not get uninstrumented, which is not the expected behaviour.
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Usage
17 -----
18
19 .. code-block:: python
20
21 import fastapi
22 from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
23
24 app = fastapi.FastAPI()
25
26 @app.get("/foobar")
27 async def foobar():
28 return {"message": "hello world"}
29
30 FastAPIInstrumentor.instrument_app(app)
31
32 Configuration
33 -------------
34
35 Exclude lists
36 *************
37 To exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_FASTAPI_EXCLUDED_URLS``
38 (or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude.
39
40 For example,
41
42 ::
43
44 export OTEL_PYTHON_FASTAPI_EXCLUDED_URLS="client/.*/info,healthcheck"
45
46 will exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.
47
48 You can also pass the comma delimited regexes to the ``instrument_app`` method directly:
49
50 .. code-block:: python
51
52 FastAPIInstrumentor.instrument_app(app, excluded_urls="client/.*/info,healthcheck")
53
54 Request/Response hooks
55 **********************
56
57 Utilize request/response hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI
58 scope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method receive is called.
59 The client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.
60
61 .. code-block:: python
62
63 def server_request_hook(span: Span, scope: dict):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def client_request_hook(span: Span, scope: dict):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_client_request_hook", "some-value")
70
71 def client_response_hook(span: Span, message: dict):
72 if span and span.is_recording():
73 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
74
75 FastAPIInstrumentor().instrument(server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)
76
77 Capture HTTP request and response headers
78 *****************************************
79 You can configure the agent to capture predefined HTTP headers as span attributes, according to the `semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_.
80
81 Request headers
82 ***************
83 To capture predefined HTTP request headers as span attributes, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST``
84 to a comma-separated list of HTTP header names.
85
86 For example,
87
88 ::
89
90 export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST="content-type,custom_request_header"
91
92 will extract ``content-type`` and ``custom_request_header`` from request headers and add them as span attributes.
93
94 It is recommended that you should give the correct names of the headers to be captured in the environment variable.
95 Request header names in fastapi are case insensitive. So, giving header name as ``CUStom-Header`` in environment variable will be able capture header with name ``custom-header``.
96
97 The name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` being the normalized HTTP header name (lowercase, with - characters replaced by _ ).
98 The value of the attribute will be single item list containing all the header values.
99
100 Example of the added span attribute,
101 ``http.request.header.custom_request_header = ["<value1>,<value2>"]``
102
103 Response headers
104 ****************
105 To capture predefined HTTP response headers as span attributes, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE``
106 to a comma-separated list of HTTP header names.
107
108 For example,
109
110 ::
111
112 export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE="content-type,custom_response_header"
113
114 will extract ``content-type`` and ``custom_response_header`` from response headers and add them as span attributes.
115
116 It is recommended that you should give the correct names of the headers to be captured in the environment variable.
117 Response header names captured in fastapi are case insensitive. So, giving header name as ``CUStomHeader`` in environment variable will be able capture header with name ``customheader``.
118
119 The name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` being the normalized HTTP header name (lowercase, with - characters replaced by _ ).
120 The value of the attribute will be single item list containing all the header values.
121
122 Example of the added span attribute,
123 ``http.response.header.custom_response_header = ["<value1>,<value2>"]``
124
125 Note:
126 Environment variable names to capture http headers are still experimental, and thus are subject to change.
127
128 API
129 ---
130 """
131 import logging
132 import typing
133 from typing import Collection
134
135 import fastapi
136 from starlette.routing import Match
137
138 from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
139 from opentelemetry.instrumentation.asgi.package import _instruments
140 from opentelemetry.instrumentation.fastapi.version import __version__
141 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
142 from opentelemetry.metrics import get_meter
143 from opentelemetry.semconv.trace import SpanAttributes
144 from opentelemetry.trace import Span
145 from opentelemetry.util.http import get_excluded_urls, parse_excluded_urls
146
147 _excluded_urls_from_env = get_excluded_urls("FASTAPI")
148 _logger = logging.getLogger(__name__)
149
150 _ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
151 _ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
152 _ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]
153
154
155 class FastAPIInstrumentor(BaseInstrumentor):
156 """An instrumentor for FastAPI
157
158 See `BaseInstrumentor`
159 """
160
161 _original_fastapi = None
162
163 @staticmethod
164 def instrument_app(
165 app: fastapi.FastAPI,
166 server_request_hook: _ServerRequestHookT = None,
167 client_request_hook: _ClientRequestHookT = None,
168 client_response_hook: _ClientResponseHookT = None,
169 tracer_provider=None,
170 meter_provider=None,
171 excluded_urls=None,
172 ):
173 """Instrument an uninstrumented FastAPI application."""
174 if not hasattr(app, "_is_instrumented_by_opentelemetry"):
175 app._is_instrumented_by_opentelemetry = False
176
177 if not getattr(app, "_is_instrumented_by_opentelemetry", False):
178 if excluded_urls is None:
179 excluded_urls = _excluded_urls_from_env
180 else:
181 excluded_urls = parse_excluded_urls(excluded_urls)
182 meter = get_meter(__name__, __version__, meter_provider)
183
184 app.add_middleware(
185 OpenTelemetryMiddleware,
186 excluded_urls=excluded_urls,
187 default_span_details=_get_route_details,
188 server_request_hook=server_request_hook,
189 client_request_hook=client_request_hook,
190 client_response_hook=client_response_hook,
191 tracer_provider=tracer_provider,
192 meter=meter,
193 )
194 app._is_instrumented_by_opentelemetry = True
195 else:
196 _logger.warning(
197 "Attempting to instrument FastAPI app while already instrumented"
198 )
199
200 @staticmethod
201 def uninstrument_app(app: fastapi.FastAPI):
202 app.user_middleware = [
203 x
204 for x in app.user_middleware
205 if x.cls is not OpenTelemetryMiddleware
206 ]
207 app.middleware_stack = app.build_middleware_stack()
208 app._is_instrumented_by_opentelemetry = False
209
210 def instrumentation_dependencies(self) -> Collection[str]:
211 return _instruments
212
213 def _instrument(self, **kwargs):
214 self._original_fastapi = fastapi.FastAPI
215 _InstrumentedFastAPI._tracer_provider = kwargs.get("tracer_provider")
216 _InstrumentedFastAPI._server_request_hook = kwargs.get(
217 "server_request_hook"
218 )
219 _InstrumentedFastAPI._client_request_hook = kwargs.get(
220 "client_request_hook"
221 )
222 _InstrumentedFastAPI._client_response_hook = kwargs.get(
223 "client_response_hook"
224 )
225 _excluded_urls = kwargs.get("excluded_urls")
226 _InstrumentedFastAPI._excluded_urls = (
227 _excluded_urls_from_env
228 if _excluded_urls is None
229 else parse_excluded_urls(_excluded_urls)
230 )
231 _InstrumentedFastAPI._meter_provider = kwargs.get("meter_provider")
232 fastapi.FastAPI = _InstrumentedFastAPI
233
234 def _uninstrument(self, **kwargs):
235 fastapi.FastAPI = self._original_fastapi
236
237
238 class _InstrumentedFastAPI(fastapi.FastAPI):
239 _tracer_provider = None
240 _meter_provider = None
241 _excluded_urls = None
242 _server_request_hook: _ServerRequestHookT = None
243 _client_request_hook: _ClientRequestHookT = None
244 _client_response_hook: _ClientResponseHookT = None
245
246 def __init__(self, *args, **kwargs):
247 super().__init__(*args, **kwargs)
248 meter = get_meter(
249 __name__, __version__, _InstrumentedFastAPI._meter_provider
250 )
251 self.add_middleware(
252 OpenTelemetryMiddleware,
253 excluded_urls=_InstrumentedFastAPI._excluded_urls,
254 default_span_details=_get_route_details,
255 server_request_hook=_InstrumentedFastAPI._server_request_hook,
256 client_request_hook=_InstrumentedFastAPI._client_request_hook,
257 client_response_hook=_InstrumentedFastAPI._client_response_hook,
258 tracer_provider=_InstrumentedFastAPI._tracer_provider,
259 meter=meter,
260 )
261 self._is_instrumented_by_opentelemetry = True
262
263
264 def _get_route_details(scope):
265 """Callback to retrieve the fastapi route being served.
266
267 TODO: there is currently no way to retrieve http.route from
268 a starlette application from scope.
269
270 See: https://github.com/encode/starlette/pull/804
271 """
272 app = scope["app"]
273 route = None
274 for starlette_route in app.routes:
275 match, _ = starlette_route.matches(scope)
276 if match == Match.FULL:
277 route = starlette_route.path
278 break
279 if match == Match.PARTIAL:
280 route = starlette_route.path
281 # method only exists for http, if websocket
282 # leave it blank.
283 span_name = route or scope.get("method", "")
284 attributes = {}
285 if route:
286 attributes[SpanAttributes.HTTP_ROUTE] = route
287 return span_name, attributes
288
[end of instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py b/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py
@@ -192,6 +192,8 @@
meter=meter,
)
app._is_instrumented_by_opentelemetry = True
+ if app not in _InstrumentedFastAPI._instrumented_fastapi_apps:
+ _InstrumentedFastAPI._instrumented_fastapi_apps.add(app)
else:
_logger.warning(
"Attempting to instrument FastAPI app while already instrumented"
@@ -232,6 +234,9 @@
fastapi.FastAPI = _InstrumentedFastAPI
def _uninstrument(self, **kwargs):
+ for instance in _InstrumentedFastAPI._instrumented_fastapi_apps:
+ self.uninstrument_app(instance)
+ _InstrumentedFastAPI._instrumented_fastapi_apps.clear()
fastapi.FastAPI = self._original_fastapi
@@ -242,6 +247,7 @@
_server_request_hook: _ServerRequestHookT = None
_client_request_hook: _ClientRequestHookT = None
_client_response_hook: _ClientResponseHookT = None
+ _instrumented_fastapi_apps = set()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -259,6 +265,11 @@
meter=meter,
)
self._is_instrumented_by_opentelemetry = True
+ _InstrumentedFastAPI._instrumented_fastapi_apps.add(self)
+
+ def __del__(self):
+ if self in _InstrumentedFastAPI._instrumented_fastapi_apps:
+ _InstrumentedFastAPI._instrumented_fastapi_apps.remove(self)
def _get_route_details(scope):
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py b/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py\n@@ -192,6 +192,8 @@\n meter=meter,\n )\n app._is_instrumented_by_opentelemetry = True\n+ if app not in _InstrumentedFastAPI._instrumented_fastapi_apps:\n+ _InstrumentedFastAPI._instrumented_fastapi_apps.add(app)\n else:\n _logger.warning(\n \"Attempting to instrument FastAPI app while already instrumented\"\n@@ -232,6 +234,9 @@\n fastapi.FastAPI = _InstrumentedFastAPI\n \n def _uninstrument(self, **kwargs):\n+ for instance in _InstrumentedFastAPI._instrumented_fastapi_apps:\n+ self.uninstrument_app(instance)\n+ _InstrumentedFastAPI._instrumented_fastapi_apps.clear()\n fastapi.FastAPI = self._original_fastapi\n \n \n@@ -242,6 +247,7 @@\n _server_request_hook: _ServerRequestHookT = None\n _client_request_hook: _ClientRequestHookT = None\n _client_response_hook: _ClientResponseHookT = None\n+ _instrumented_fastapi_apps = set()\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n@@ -259,6 +265,11 @@\n meter=meter,\n )\n self._is_instrumented_by_opentelemetry = True\n+ _InstrumentedFastAPI._instrumented_fastapi_apps.add(self)\n+\n+ def __del__(self):\n+ if self in _InstrumentedFastAPI._instrumented_fastapi_apps:\n+ _InstrumentedFastAPI._instrumented_fastapi_apps.remove(self)\n \n \n def _get_route_details(scope):\n", "issue": "Uninstrument Existing fastapi instances\nExisting instances shouldn't continue to send any telemetry after the `uninstrument`.\r\n\r\n_Originally posted by @srikanthccv in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1199#discussion_r957094027_\r\n\r\n`uninstrument` method of `FastAPIInstrumentor` restore the original `fastapi.FastAPI` class, but all the existing objects of the earlier instrumented class do not get uninstrumented, which is not the expected behaviour.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage\n-----\n\n.. code-block:: python\n\n import fastapi\n from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor\n\n app = fastapi.FastAPI()\n\n @app.get(\"/foobar\")\n async def foobar():\n return {\"message\": \"hello world\"}\n\n FastAPIInstrumentor.instrument_app(app)\n\nConfiguration\n-------------\n\nExclude lists\n*************\nTo exclude certain URLs from being tracked, set the environment variable ``OTEL_PYTHON_FASTAPI_EXCLUDED_URLS``\n(or ``OTEL_PYTHON_EXCLUDED_URLS`` as fallback) with comma delimited regexes representing which URLs to exclude.\n\nFor example,\n\n::\n\n export OTEL_PYTHON_FASTAPI_EXCLUDED_URLS=\"client/.*/info,healthcheck\"\n\nwill exclude requests such as ``https://site/client/123/info`` and ``https://site/xyz/healthcheck``.\n\nYou can also pass the comma delimited regexes to the ``instrument_app`` method directly:\n\n.. code-block:: python\n\n FastAPIInstrumentor.instrument_app(app, excluded_urls=\"client/.*/info,healthcheck\")\n\nRequest/Response hooks\n**********************\n\nUtilize request/response hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI\nscope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method receive is called.\nThe client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.\n\n.. code-block:: python\n\n def server_request_hook(span: Span, scope: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def client_request_hook(span: Span, scope: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_client_request_hook\", \"some-value\")\n\n def client_response_hook(span: Span, message: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n FastAPIInstrumentor().instrument(server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)\n\nCapture HTTP request and response headers\n*****************************************\nYou can configure the agent to capture predefined HTTP headers as span attributes, according to the `semantic convention <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#http-request-and-response-headers>`_.\n\nRequest headers\n***************\nTo capture predefined HTTP request headers as span attributes, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST``\nto a comma-separated list of HTTP header names.\n\nFor example,\n\n::\n\n export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST=\"content-type,custom_request_header\"\n\nwill extract ``content-type`` and ``custom_request_header`` from request headers and add them as span attributes.\n\nIt is recommended that you should give the correct names of the headers to be captured in the environment variable.\nRequest header names in fastapi are case insensitive. So, giving header name as ``CUStom-Header`` in environment variable will be able capture header with name ``custom-header``.\n\nThe name of the added span attribute will follow the format ``http.request.header.<header_name>`` where ``<header_name>`` being the normalized HTTP header name (lowercase, with - characters replaced by _ ).\nThe value of the attribute will be single item list containing all the header values.\n\nExample of the added span attribute,\n``http.request.header.custom_request_header = [\"<value1>,<value2>\"]``\n\nResponse headers\n****************\nTo capture predefined HTTP response headers as span attributes, set the environment variable ``OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE``\nto a comma-separated list of HTTP header names.\n\nFor example,\n\n::\n\n export OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE=\"content-type,custom_response_header\"\n\nwill extract ``content-type`` and ``custom_response_header`` from response headers and add them as span attributes.\n\nIt is recommended that you should give the correct names of the headers to be captured in the environment variable.\nResponse header names captured in fastapi are case insensitive. So, giving header name as ``CUStomHeader`` in environment variable will be able capture header with name ``customheader``.\n\nThe name of the added span attribute will follow the format ``http.response.header.<header_name>`` where ``<header_name>`` being the normalized HTTP header name (lowercase, with - characters replaced by _ ).\nThe value of the attribute will be single item list containing all the header values.\n\nExample of the added span attribute,\n``http.response.header.custom_response_header = [\"<value1>,<value2>\"]``\n\nNote:\n Environment variable names to capture http headers are still experimental, and thus are subject to change.\n\nAPI\n---\n\"\"\"\nimport logging\nimport typing\nfrom typing import Collection\n\nimport fastapi\nfrom starlette.routing import Match\n\nfrom opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\nfrom opentelemetry.instrumentation.asgi.package import _instruments\nfrom opentelemetry.instrumentation.fastapi.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.metrics import get_meter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import Span\nfrom opentelemetry.util.http import get_excluded_urls, parse_excluded_urls\n\n_excluded_urls_from_env = get_excluded_urls(\"FASTAPI\")\n_logger = logging.getLogger(__name__)\n\n_ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n_ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n_ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n\n\nclass FastAPIInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for FastAPI\n\n See `BaseInstrumentor`\n \"\"\"\n\n _original_fastapi = None\n\n @staticmethod\n def instrument_app(\n app: fastapi.FastAPI,\n server_request_hook: _ServerRequestHookT = None,\n client_request_hook: _ClientRequestHookT = None,\n client_response_hook: _ClientResponseHookT = None,\n tracer_provider=None,\n meter_provider=None,\n excluded_urls=None,\n ):\n \"\"\"Instrument an uninstrumented FastAPI application.\"\"\"\n if not hasattr(app, \"_is_instrumented_by_opentelemetry\"):\n app._is_instrumented_by_opentelemetry = False\n\n if not getattr(app, \"_is_instrumented_by_opentelemetry\", False):\n if excluded_urls is None:\n excluded_urls = _excluded_urls_from_env\n else:\n excluded_urls = parse_excluded_urls(excluded_urls)\n meter = get_meter(__name__, __version__, meter_provider)\n\n app.add_middleware(\n OpenTelemetryMiddleware,\n excluded_urls=excluded_urls,\n default_span_details=_get_route_details,\n server_request_hook=server_request_hook,\n client_request_hook=client_request_hook,\n client_response_hook=client_response_hook,\n tracer_provider=tracer_provider,\n meter=meter,\n )\n app._is_instrumented_by_opentelemetry = True\n else:\n _logger.warning(\n \"Attempting to instrument FastAPI app while already instrumented\"\n )\n\n @staticmethod\n def uninstrument_app(app: fastapi.FastAPI):\n app.user_middleware = [\n x\n for x in app.user_middleware\n if x.cls is not OpenTelemetryMiddleware\n ]\n app.middleware_stack = app.build_middleware_stack()\n app._is_instrumented_by_opentelemetry = False\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n self._original_fastapi = fastapi.FastAPI\n _InstrumentedFastAPI._tracer_provider = kwargs.get(\"tracer_provider\")\n _InstrumentedFastAPI._server_request_hook = kwargs.get(\n \"server_request_hook\"\n )\n _InstrumentedFastAPI._client_request_hook = kwargs.get(\n \"client_request_hook\"\n )\n _InstrumentedFastAPI._client_response_hook = kwargs.get(\n \"client_response_hook\"\n )\n _excluded_urls = kwargs.get(\"excluded_urls\")\n _InstrumentedFastAPI._excluded_urls = (\n _excluded_urls_from_env\n if _excluded_urls is None\n else parse_excluded_urls(_excluded_urls)\n )\n _InstrumentedFastAPI._meter_provider = kwargs.get(\"meter_provider\")\n fastapi.FastAPI = _InstrumentedFastAPI\n\n def _uninstrument(self, **kwargs):\n fastapi.FastAPI = self._original_fastapi\n\n\nclass _InstrumentedFastAPI(fastapi.FastAPI):\n _tracer_provider = None\n _meter_provider = None\n _excluded_urls = None\n _server_request_hook: _ServerRequestHookT = None\n _client_request_hook: _ClientRequestHookT = None\n _client_response_hook: _ClientResponseHookT = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n meter = get_meter(\n __name__, __version__, _InstrumentedFastAPI._meter_provider\n )\n self.add_middleware(\n OpenTelemetryMiddleware,\n excluded_urls=_InstrumentedFastAPI._excluded_urls,\n default_span_details=_get_route_details,\n server_request_hook=_InstrumentedFastAPI._server_request_hook,\n client_request_hook=_InstrumentedFastAPI._client_request_hook,\n client_response_hook=_InstrumentedFastAPI._client_response_hook,\n tracer_provider=_InstrumentedFastAPI._tracer_provider,\n meter=meter,\n )\n self._is_instrumented_by_opentelemetry = True\n\n\ndef _get_route_details(scope):\n \"\"\"Callback to retrieve the fastapi route being served.\n\n TODO: there is currently no way to retrieve http.route from\n a starlette application from scope.\n\n See: https://github.com/encode/starlette/pull/804\n \"\"\"\n app = scope[\"app\"]\n route = None\n for starlette_route in app.routes:\n match, _ = starlette_route.matches(scope)\n if match == Match.FULL:\n route = starlette_route.path\n break\n if match == Match.PARTIAL:\n route = starlette_route.path\n # method only exists for http, if websocket\n # leave it blank.\n span_name = route or scope.get(\"method\", \"\")\n attributes = {}\n if route:\n attributes[SpanAttributes.HTTP_ROUTE] = route\n return span_name, attributes\n", "path": "instrumentation/opentelemetry-instrumentation-fastapi/src/opentelemetry/instrumentation/fastapi/__init__.py"}]}
| 3,913 | 482 |
gh_patches_debug_42424
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1452
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete messages when `!pypi` module is invalid
When users use the `!pypi` command with an invalid module, you currently get a response saying `"Package could not be found."`.
What I'm proposing it that after a delay (I believe we have `bot.constants.RedirectOutput.delete_delay` for this) the bot should delete both its response and the invocation message, like [`!d` currently does](https://github.com/python-discord/bot/blob/master/bot/exts/info/doc.py#L363-L365).
This would help reduce channel flooding when someone is trying to remember the name of a module, and since it's not a trivial change I'm happy to try and implement this myself if it's something we decide we want.
</issue>
<code>
[start of bot/exts/info/pypi.py]
1 import itertools
2 import logging
3 import random
4 import re
5
6 from discord import Embed
7 from discord.ext.commands import Cog, Context, command
8 from discord.utils import escape_markdown
9
10 from bot.bot import Bot
11 from bot.constants import Colours, NEGATIVE_REPLIES
12
13 URL = "https://pypi.org/pypi/{package}/json"
14 FIELDS = ("author", "requires_python", "summary", "license")
15 PYPI_ICON = "https://cdn.discordapp.com/emojis/766274397257334814.png"
16
17 PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))
18
19 ILLEGAL_CHARACTERS = re.compile(r"[^a-zA-Z0-9-.]+")
20
21 log = logging.getLogger(__name__)
22
23
24 class PyPi(Cog):
25 """Cog for getting information about PyPi packages."""
26
27 def __init__(self, bot: Bot):
28 self.bot = bot
29
30 @command(name="pypi", aliases=("package", "pack"))
31 async def get_package_info(self, ctx: Context, package: str) -> None:
32 """Provide information about a specific package from PyPI."""
33 embed = Embed(
34 title=random.choice(NEGATIVE_REPLIES),
35 colour=Colours.soft_red
36 )
37 embed.set_thumbnail(url=PYPI_ICON)
38
39 if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:
40 embed.description = f"Illegal character passed into command: '{escape_markdown(character.group(0))}'"
41 await ctx.send(embed=embed)
42 return
43
44 async with self.bot.http_session.get(URL.format(package=package)) as response:
45 if response.status == 404:
46 embed.description = "Package could not be found."
47
48 elif response.status == 200 and response.content_type == "application/json":
49 response_json = await response.json()
50 info = response_json["info"]
51
52 embed.title = f"{info['name']} v{info['version']}"
53 embed.url = info['package_url']
54 embed.colour = next(PYPI_COLOURS)
55
56 for field in FIELDS:
57 field_data = info[field]
58
59 # Field could be completely empty, in some cases can be a string with whitespaces, or None.
60 if field_data and not field_data.isspace():
61 if '\n' in field_data and field == "license":
62 field_data = field_data.split('\n')[0]
63
64 embed.add_field(
65 name=field.replace("_", " ").title(),
66 value=escape_markdown(field_data),
67 inline=False,
68 )
69
70 else:
71 embed.description = "There was an error when fetching your PyPi package."
72 log.trace(f"Error when fetching PyPi package: {response.status}.")
73
74 await ctx.send(embed=embed)
75
76
77 def setup(bot: Bot) -> None:
78 """Load the PyPi cog."""
79 bot.add_cog(PyPi(bot))
80
[end of bot/exts/info/pypi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bot/exts/info/pypi.py b/bot/exts/info/pypi.py
--- a/bot/exts/info/pypi.py
+++ b/bot/exts/info/pypi.py
@@ -8,7 +8,7 @@
from discord.utils import escape_markdown
from bot.bot import Bot
-from bot.constants import Colours, NEGATIVE_REPLIES
+from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput
URL = "https://pypi.org/pypi/{package}/json"
FIELDS = ("author", "requires_python", "summary", "license")
@@ -17,6 +17,7 @@
PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))
ILLEGAL_CHARACTERS = re.compile(r"[^a-zA-Z0-9-.]+")
+INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay
log = logging.getLogger(__name__)
@@ -36,42 +37,49 @@
)
embed.set_thumbnail(url=PYPI_ICON)
+ error = True
+
if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:
embed.description = f"Illegal character passed into command: '{escape_markdown(character.group(0))}'"
- await ctx.send(embed=embed)
- return
- async with self.bot.http_session.get(URL.format(package=package)) as response:
- if response.status == 404:
- embed.description = "Package could not be found."
+ else:
+ async with self.bot.http_session.get(URL.format(package=package)) as response:
+ if response.status == 404:
+ embed.description = "Package could not be found."
- elif response.status == 200 and response.content_type == "application/json":
- response_json = await response.json()
- info = response_json["info"]
+ elif response.status == 200 and response.content_type == "application/json":
+ response_json = await response.json()
+ info = response_json["info"]
- embed.title = f"{info['name']} v{info['version']}"
- embed.url = info['package_url']
- embed.colour = next(PYPI_COLOURS)
+ embed.title = f"{info['name']} v{info['version']}"
+ embed.url = info['package_url']
+ embed.colour = next(PYPI_COLOURS)
- for field in FIELDS:
- field_data = info[field]
+ for field in FIELDS:
+ field_data = info[field]
- # Field could be completely empty, in some cases can be a string with whitespaces, or None.
- if field_data and not field_data.isspace():
- if '\n' in field_data and field == "license":
- field_data = field_data.split('\n')[0]
+ # Field could be completely empty, in some cases can be a string with whitespaces, or None.
+ if field_data and not field_data.isspace():
+ if '\n' in field_data and field == "license":
+ field_data = field_data.split('\n')[0]
- embed.add_field(
- name=field.replace("_", " ").title(),
- value=escape_markdown(field_data),
- inline=False,
- )
+ embed.add_field(
+ name=field.replace("_", " ").title(),
+ value=escape_markdown(field_data),
+ inline=False,
+ )
- else:
- embed.description = "There was an error when fetching your PyPi package."
- log.trace(f"Error when fetching PyPi package: {response.status}.")
+ error = False
- await ctx.send(embed=embed)
+ else:
+ embed.description = "There was an error when fetching your PyPi package."
+ log.trace(f"Error when fetching PyPi package: {response.status}.")
+
+ if error:
+ await ctx.send(embed=embed, delete_after=INVALID_INPUT_DELETE_DELAY)
+ await ctx.message.delete(delay=INVALID_INPUT_DELETE_DELAY)
+ else:
+ await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
|
{"golden_diff": "diff --git a/bot/exts/info/pypi.py b/bot/exts/info/pypi.py\n--- a/bot/exts/info/pypi.py\n+++ b/bot/exts/info/pypi.py\n@@ -8,7 +8,7 @@\n from discord.utils import escape_markdown\n \n from bot.bot import Bot\n-from bot.constants import Colours, NEGATIVE_REPLIES\n+from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput\n \n URL = \"https://pypi.org/pypi/{package}/json\"\n FIELDS = (\"author\", \"requires_python\", \"summary\", \"license\")\n@@ -17,6 +17,7 @@\n PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))\n \n ILLEGAL_CHARACTERS = re.compile(r\"[^a-zA-Z0-9-.]+\")\n+INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay\n \n log = logging.getLogger(__name__)\n \n@@ -36,42 +37,49 @@\n )\n embed.set_thumbnail(url=PYPI_ICON)\n \n+ error = True\n+\n if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:\n embed.description = f\"Illegal character passed into command: '{escape_markdown(character.group(0))}'\"\n- await ctx.send(embed=embed)\n- return\n \n- async with self.bot.http_session.get(URL.format(package=package)) as response:\n- if response.status == 404:\n- embed.description = \"Package could not be found.\"\n+ else:\n+ async with self.bot.http_session.get(URL.format(package=package)) as response:\n+ if response.status == 404:\n+ embed.description = \"Package could not be found.\"\n \n- elif response.status == 200 and response.content_type == \"application/json\":\n- response_json = await response.json()\n- info = response_json[\"info\"]\n+ elif response.status == 200 and response.content_type == \"application/json\":\n+ response_json = await response.json()\n+ info = response_json[\"info\"]\n \n- embed.title = f\"{info['name']} v{info['version']}\"\n- embed.url = info['package_url']\n- embed.colour = next(PYPI_COLOURS)\n+ embed.title = f\"{info['name']} v{info['version']}\"\n+ embed.url = info['package_url']\n+ embed.colour = next(PYPI_COLOURS)\n \n- for field in FIELDS:\n- field_data = info[field]\n+ for field in FIELDS:\n+ field_data = info[field]\n \n- # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n- if field_data and not field_data.isspace():\n- if '\\n' in field_data and field == \"license\":\n- field_data = field_data.split('\\n')[0]\n+ # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n+ if field_data and not field_data.isspace():\n+ if '\\n' in field_data and field == \"license\":\n+ field_data = field_data.split('\\n')[0]\n \n- embed.add_field(\n- name=field.replace(\"_\", \" \").title(),\n- value=escape_markdown(field_data),\n- inline=False,\n- )\n+ embed.add_field(\n+ name=field.replace(\"_\", \" \").title(),\n+ value=escape_markdown(field_data),\n+ inline=False,\n+ )\n \n- else:\n- embed.description = \"There was an error when fetching your PyPi package.\"\n- log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n+ error = False\n \n- await ctx.send(embed=embed)\n+ else:\n+ embed.description = \"There was an error when fetching your PyPi package.\"\n+ log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n+\n+ if error:\n+ await ctx.send(embed=embed, delete_after=INVALID_INPUT_DELETE_DELAY)\n+ await ctx.message.delete(delay=INVALID_INPUT_DELETE_DELAY)\n+ else:\n+ await ctx.send(embed=embed)\n \n \n def setup(bot: Bot) -> None:\n", "issue": "Delete messages when `!pypi` module is invalid\nWhen users use the `!pypi` command with an invalid module, you currently get a response saying `\"Package could not be found.\"`. \r\n\r\nWhat I'm proposing it that after a delay (I believe we have `bot.constants.RedirectOutput.delete_delay` for this) the bot should delete both its response and the invocation message, like [`!d` currently does](https://github.com/python-discord/bot/blob/master/bot/exts/info/doc.py#L363-L365).\r\n\r\nThis would help reduce channel flooding when someone is trying to remember the name of a module, and since it's not a trivial change I'm happy to try and implement this myself if it's something we decide we want.\n", "before_files": [{"content": "import itertools\nimport logging\nimport random\nimport re\n\nfrom discord import Embed\nfrom discord.ext.commands import Cog, Context, command\nfrom discord.utils import escape_markdown\n\nfrom bot.bot import Bot\nfrom bot.constants import Colours, NEGATIVE_REPLIES\n\nURL = \"https://pypi.org/pypi/{package}/json\"\nFIELDS = (\"author\", \"requires_python\", \"summary\", \"license\")\nPYPI_ICON = \"https://cdn.discordapp.com/emojis/766274397257334814.png\"\n\nPYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))\n\nILLEGAL_CHARACTERS = re.compile(r\"[^a-zA-Z0-9-.]+\")\n\nlog = logging.getLogger(__name__)\n\n\nclass PyPi(Cog):\n \"\"\"Cog for getting information about PyPi packages.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @command(name=\"pypi\", aliases=(\"package\", \"pack\"))\n async def get_package_info(self, ctx: Context, package: str) -> None:\n \"\"\"Provide information about a specific package from PyPI.\"\"\"\n embed = Embed(\n title=random.choice(NEGATIVE_REPLIES),\n colour=Colours.soft_red\n )\n embed.set_thumbnail(url=PYPI_ICON)\n\n if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:\n embed.description = f\"Illegal character passed into command: '{escape_markdown(character.group(0))}'\"\n await ctx.send(embed=embed)\n return\n\n async with self.bot.http_session.get(URL.format(package=package)) as response:\n if response.status == 404:\n embed.description = \"Package could not be found.\"\n\n elif response.status == 200 and response.content_type == \"application/json\":\n response_json = await response.json()\n info = response_json[\"info\"]\n\n embed.title = f\"{info['name']} v{info['version']}\"\n embed.url = info['package_url']\n embed.colour = next(PYPI_COLOURS)\n\n for field in FIELDS:\n field_data = info[field]\n\n # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n if field_data and not field_data.isspace():\n if '\\n' in field_data and field == \"license\":\n field_data = field_data.split('\\n')[0]\n\n embed.add_field(\n name=field.replace(\"_\", \" \").title(),\n value=escape_markdown(field_data),\n inline=False,\n )\n\n else:\n embed.description = \"There was an error when fetching your PyPi package.\"\n log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the PyPi cog.\"\"\"\n bot.add_cog(PyPi(bot))\n", "path": "bot/exts/info/pypi.py"}]}
| 1,497 | 924 |
gh_patches_debug_58079
|
rasdani/github-patches
|
git_diff
|
secondmind-labs__trieste-140
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ensure type information is packaged with installable
**Describe the feature you'd like**
As a user, I want to be able to type check my code that calls into trieste. It's not clear at the moment if this is currently possible, given there is no py.typed file.
See the python PEP for more info https://www.python.org/dev/peps/pep-0561/
</issue>
<code>
[start of setup.py]
1 # Copyright 2020 The Trieste Contributors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import find_packages, setup
16
17 with open("README.md", "r") as file:
18 long_description = file.read()
19
20 setup(
21 name="trieste",
22 version="0.3.1",
23 author="The Trieste contributors",
24 author_email="[email protected]",
25 description="A Bayesian optimization research toolbox built on TensorFlow",
26 long_description=long_description,
27 long_description_content_type="text/markdown",
28 url="https://github.com/secondmind-labs/trieste",
29 packages=find_packages(include=("trieste*",)),
30 classifiers=[
31 "Programming Language :: Python :: 3.7",
32 "License :: OSI Approved :: Apache Software License",
33 "Operating System :: OS Independent",
34 ],
35 python_requires="~=3.7",
36 install_requires=[
37 "absl-py",
38 "gpflow==2.1.*",
39 "numpy",
40 # tensorflow!=2.2.0,!=2.3.0 because of https://github.com/advisories/GHSA-8fxw-76px-3rxv
41 "tensorflow>=2.1,!=2.2.0,!=2.3.0",
42 "tensorflow-probability>=0.9",
43 ],
44 )
45
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,6 +27,9 @@
long_description_content_type="text/markdown",
url="https://github.com/secondmind-labs/trieste",
packages=find_packages(include=("trieste*",)),
+ package_data={
+ "trieste": ["py.typed"],
+ },
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,6 +27,9 @@\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/secondmind-labs/trieste\",\n packages=find_packages(include=(\"trieste*\",)),\n+ package_data={\n+ \"trieste\": [\"py.typed\"],\n+ },\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: Apache Software License\",\n", "issue": "Ensure type information is packaged with installable\n**Describe the feature you'd like**\r\nAs a user, I want to be able to type check my code that calls into trieste. It's not clear at the moment if this is currently possible, given there is no py.typed file.\r\n\r\nSee the python PEP for more info https://www.python.org/dev/peps/pep-0561/\n", "before_files": [{"content": "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as file:\n long_description = file.read()\n\nsetup(\n name=\"trieste\",\n version=\"0.3.1\",\n author=\"The Trieste contributors\",\n author_email=\"[email protected]\",\n description=\"A Bayesian optimization research toolbox built on TensorFlow\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/secondmind-labs/trieste\",\n packages=find_packages(include=(\"trieste*\",)),\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\"~=3.7\",\n install_requires=[\n \"absl-py\",\n \"gpflow==2.1.*\",\n \"numpy\",\n # tensorflow!=2.2.0,!=2.3.0 because of https://github.com/advisories/GHSA-8fxw-76px-3rxv\n \"tensorflow>=2.1,!=2.2.0,!=2.3.0\",\n \"tensorflow-probability>=0.9\",\n ],\n)\n", "path": "setup.py"}]}
| 1,096 | 115 |
gh_patches_debug_19184
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3044
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OpenAPI Specification for the /datafiles/ endpoint and fix the incorrect specs
## Problem
In order to ensure the accuracy of the specs generated by drf-spectacular for /datafiles/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page and also confirm it with the front-end team.
## Proposed solution
* Implement custom preprocessing hook function to filter out all endpoints except for the /datafiles/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/
* Update the Spectacular settings in the _common_settings.py_ file to include the custom preprocessing hook.
* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.
* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command
</issue>
<code>
[start of config/settings/common_settings.py]
1 """
2 Base settings to build other settings files upon.
3
4 Generated by 'django-admin startproject' using Django 3.1.7.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.1/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.1/ref/settings/
11 """
12
13 import os
14 from pathlib import Path
15
16 from decouple import Csv, config as decouple_config
17 from dj_database_url import parse as db_url
18
19
20 # We use a 'tuple' with pipes as delimiters as decople naively splits the global
21 # variables on commas when casting to Csv()
22 def pipe_delim(pipe_string):
23 # Remove opening and closing brackets
24 pipe_string = pipe_string[1:-1]
25 # Split on pipe delim
26 return pipe_string.split("|")
27
28
29 # Build paths inside the project like this: BASE_DIR / 'subdir'.
30 BASE_DIR = Path(__file__).resolve().parent.parent.parent
31
32 # Application definition
33
34 INSTALLED_APPS = [
35 "django.contrib.admin",
36 "django.contrib.auth",
37 "django.contrib.contenttypes",
38 "django.contrib.sessions",
39 "django.contrib.messages",
40 "whitenoise.runserver_nostatic",
41 "django.contrib.staticfiles",
42 "rest_framework",
43 "django_filters",
44 "django_property_filter",
45 "drf_spectacular",
46 "mathesar",
47 ]
48
49 MIDDLEWARE = [
50 "django.middleware.security.SecurityMiddleware",
51 "whitenoise.middleware.WhiteNoiseMiddleware",
52 "django.contrib.sessions.middleware.SessionMiddleware",
53 "django.middleware.common.CommonMiddleware",
54 "django.middleware.csrf.CsrfViewMiddleware",
55 "django.contrib.auth.middleware.AuthenticationMiddleware",
56 "django.contrib.messages.middleware.MessageMiddleware",
57 "django.middleware.clickjacking.XFrameOptionsMiddleware",
58 "mathesar.middleware.CursorClosedHandlerMiddleware",
59 "mathesar.middleware.PasswordChangeNeededMiddleware",
60 'django_userforeignkey.middleware.UserForeignKeyMiddleware',
61 'django_request_cache.middleware.RequestCacheMiddleware',
62 ]
63
64 ROOT_URLCONF = "config.urls"
65
66 TEMPLATES = [
67 {
68 "BACKEND": "django.template.backends.django.DjangoTemplates",
69 "DIRS": [],
70 "APP_DIRS": True,
71 "OPTIONS": {
72 "context_processors": [
73 "config.context_processors.frontend_settings",
74 "django.template.context_processors.debug",
75 "django.template.context_processors.request",
76 "django.contrib.auth.context_processors.auth",
77 "django.contrib.messages.context_processors.messages",
78 "mathesar.template_context_processors.base_template_extensions.script_extension_templates"
79 ],
80 },
81 },
82 ]
83
84 WSGI_APPLICATION = "config.wsgi.application"
85
86 # Database
87 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases
88
89 # TODO: Add to documentation that database keys should not be than 128 characters.
90
91 # MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'
92 # See pipe_delim above for why we use pipes as delimiters
93 DATABASES = {
94 db_key: db_url(url_string)
95 for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))
96 }
97 DATABASES[decouple_config('DJANGO_DATABASE_KEY', default="default")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)
98
99 for db_key, db_dict in DATABASES.items():
100 # Engine can be '.postgresql' or '.postgresql_psycopg2'
101 if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):
102 raise ValueError(
103 f"{db_key} is not a PostgreSQL database. "
104 f"{db_dict['ENGINE']} found for {db_key}'s engine."
105 )
106
107 # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'
108 # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']
109 TEST = decouple_config('TEST', default=False, cast=bool)
110 if TEST:
111 for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):
112 DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}
113
114
115 # SECURITY WARNING: keep the secret key used in production secret!
116 SECRET_KEY = decouple_config('SECRET_KEY')
117
118 # SECURITY WARNING: don't run with debug turned on in production!
119 DEBUG = decouple_config('DEBUG', default=False, cast=bool)
120
121 ALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())
122
123 # Password validation
124 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
125
126 AUTH_PASSWORD_VALIDATORS = [
127 {
128 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
129 },
130 {
131 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
132 },
133 {
134 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
135 },
136 {
137 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
138 },
139 ]
140
141 # Internationalization
142 # https://docs.djangoproject.com/en/3.1/topics/i18n/
143
144 LANGUAGE_CODE = "en-us"
145
146 TIME_ZONE = "UTC"
147
148 USE_I18N = True
149
150 USE_L10N = True
151
152 USE_TZ = True
153
154 # Static files (CSS, JavaScript, Images)
155 # https://docs.djangoproject.com/en/3.1/howto/static-files/
156 # https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/
157
158 STATIC_URL = "/static/"
159
160 # When running with DEBUG=False, the webserver needs to serve files from this location
161 # python manage.py collectstatic has to be run to collect all static files into this location
162 # The files need to served in brotli or gzip compressed format
163 STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
164
165 # Media files (uploaded by the user)
166
167 MEDIA_ROOT = os.path.join(BASE_DIR, '.media/')
168
169 MEDIA_URL = "/media/"
170
171 # Update Authentication classes, removed BasicAuthentication
172 # Defaults: https://www.django-rest-framework.org/api-guide/settings/
173 REST_FRAMEWORK = {
174 'DEFAULT_AUTHENTICATION_CLASSES': [
175 'rest_framework.authentication.TokenAuthentication',
176 'rest_framework.authentication.SessionAuthentication'
177 ],
178 'DEFAULT_PERMISSION_CLASSES': [
179 'rest_framework.permissions.IsAuthenticated',
180 ],
181 'DEFAULT_FILTER_BACKENDS': (
182 'django_filters.rest_framework.DjangoFilterBackend',
183 'rest_framework.filters.OrderingFilter',
184 ),
185 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
186 'EXCEPTION_HANDLER':
187 'mathesar.exception_handlers.mathesar_exception_handler',
188 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'
189 }
190 SPECTACULAR_SETTINGS = {
191 'TITLE': 'Mathesar API',
192 'DESCRIPTION': '',
193 'VERSION': '1.0.0',
194 'SERVE_INCLUDE_SCHEMA': False,
195 'POSTPROCESSING_HOOKS': [
196 'config.settings.openapi.remove_url_prefix_hook',
197 ],
198 # OTHER SETTINGS
199 }
200 FRIENDLY_ERRORS = {
201 'FIELD_ERRORS': {
202 # By default drf-friendly-errors does contain error codes for ListSerializer type
203 'ListSerializer': {
204 'required': 2007,
205 'null': 2027,
206 'invalid_choice': 2083,
207 'not_a_list': 2123,
208 'empty': 2093
209 },
210 'PermittedPkRelatedField': {
211 'required': 2007,
212 'null': 2027,
213 'does_not_exist': 2151,
214 'incorrect_type': 2161
215 },
216 'PermittedSlugRelatedField': {
217 'required': 2007, 'invalid': 2002, 'null': 2027,
218 'does_not_exist': 2151, 'incorrect_type': 2161
219 },
220 },
221 'EXCEPTION_DICT': {
222 'Http404': 4005
223 }
224 }
225 # Mathesar settings
226 MATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')
227 MATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')
228 MATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')
229 MATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'
230 MATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')
231 MATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)
232 MATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')
233
234 # UI source files have to be served by Django in order for static assets to be included during dev mode
235 # https://vitejs.dev/guide/assets.html
236 # https://vitejs.dev/guide/backend-integration.html
237 STATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]
238 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
239
240 # Accounts
241 AUTH_USER_MODEL = 'mathesar.User'
242 LOGIN_URL = '/auth/login/'
243 LOGIN_REDIRECT_URL = '/'
244 LOGOUT_REDIRECT_URL = LOGIN_URL
245 DRF_ACCESS_POLICY = {
246 'reusable_conditions': ['mathesar.api.permission_conditions']
247 }
248 # List of Template names that contains additional script tags to be added to the base template
249 BASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []
250
[end of config/settings/common_settings.py]
[start of config/settings/openapi.py]
1 def remove_url_prefix_hook(result, **kwargs):
2 # Remove namespace and version URL prefix from the operation Id of the generated API schema
3 for path, path_info in result['paths'].items():
4 for method, operation in path_info.items():
5 operation_id = operation.get('operationId')
6 if operation_id:
7 if path.startswith('/api/db/v0/'):
8 operation['operationId'] = operation_id.replace('db_v0_', '')
9 elif path.startswith('/api/ui/v0/'):
10 operation['operationId'] = operation_id.replace('ui_v0_', '')
11
12 return result
13
[end of config/settings/openapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/config/settings/common_settings.py b/config/settings/common_settings.py
--- a/config/settings/common_settings.py
+++ b/config/settings/common_settings.py
@@ -192,6 +192,7 @@
'DESCRIPTION': '',
'VERSION': '1.0.0',
'SERVE_INCLUDE_SCHEMA': False,
+ 'PREPROCESSING_HOOKS': ['config.settings.openapi.custom_preprocessing_hook'],
'POSTPROCESSING_HOOKS': [
'config.settings.openapi.remove_url_prefix_hook',
],
diff --git a/config/settings/openapi.py b/config/settings/openapi.py
--- a/config/settings/openapi.py
+++ b/config/settings/openapi.py
@@ -1,3 +1,12 @@
+def custom_preprocessing_hook(endpoints):
+ filtered = []
+ for (path, path_regex, method, callback) in endpoints:
+ # Remove all but DRF API endpoints
+ if path.startswith("/api/db/v0/data_files/"):
+ filtered.append((path, path_regex, method, callback))
+ return filtered
+
+
def remove_url_prefix_hook(result, **kwargs):
# Remove namespace and version URL prefix from the operation Id of the generated API schema
for path, path_info in result['paths'].items():
|
{"golden_diff": "diff --git a/config/settings/common_settings.py b/config/settings/common_settings.py\n--- a/config/settings/common_settings.py\n+++ b/config/settings/common_settings.py\n@@ -192,6 +192,7 @@\n 'DESCRIPTION': '',\n 'VERSION': '1.0.0',\n 'SERVE_INCLUDE_SCHEMA': False,\n+ 'PREPROCESSING_HOOKS': ['config.settings.openapi.custom_preprocessing_hook'],\n 'POSTPROCESSING_HOOKS': [\n 'config.settings.openapi.remove_url_prefix_hook',\n ],\ndiff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -1,3 +1,12 @@\n+def custom_preprocessing_hook(endpoints):\n+ filtered = []\n+ for (path, path_regex, method, callback) in endpoints:\n+ # Remove all but DRF API endpoints\n+ if path.startswith(\"/api/db/v0/data_files/\"):\n+ filtered.append((path, path_regex, method, callback))\n+ return filtered\n+\n+\n def remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n", "issue": "Add OpenAPI Specification for the /datafiles/ endpoint and fix the incorrect specs\n## Problem\r\nIn order to ensure the accuracy of the specs generated by drf-spectacular for /datafiles/ endpoint , we will generate an OpenAPI Specification specifically for this endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page and also confirm it with the front-end team.\r\n\r\n## Proposed solution\r\n* Implement custom preprocessing hook function to filter out all endpoints except for the /datafiles/ endpoint.The hook function selectively retains only the endpoint paths that match the /datafiles/ \r\n* Update the Spectacular settings in the _common_settings.py_ file to include the custom preprocessing hook.\r\n* Configure the PREPROCESSING_HOOKS setting with the custom hook function, we ensure that only the /datafiles/ endpoint is considered during the OpenAPI specification generation process.\r\n* Finally, generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nBase settings to build other settings files upon.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"drf_spectacular\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"mathesar.middleware.CursorClosedHandlerMiddleware\",\n \"mathesar.middleware.PasswordChangeNeededMiddleware\",\n 'django_userforeignkey.middleware.UserForeignKeyMiddleware',\n 'django_request_cache.middleware.RequestCacheMiddleware',\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.frontend_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"mathesar.template_context_processors.base_template_extensions.script_extension_templates\"\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY', default=\"default\")] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\nfor db_key, db_dict in DATABASES.items():\n # Engine can be '.postgresql' or '.postgresql_psycopg2'\n if not db_dict['ENGINE'].startswith('django.db.backends.postgresql'):\n raise ValueError(\n f\"{db_key} is not a PostgreSQL database. \"\n f\"{db_dict['ENGINE']} found for {db_key}'s engine.\"\n )\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nTEST = decouple_config('TEST', default=False, cast=bool)\nif TEST:\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/\n\nSTATIC_URL = \"/static/\"\n\n# When running with DEBUG=False, the webserver needs to serve files from this location\n# python manage.py collectstatic has to be run to collect all static files into this location\n# The files need to served in brotli or gzip compressed format\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n\n# Update Authentication classes, removed BasicAuthentication\n# Defaults: https://www.django-rest-framework.org/api-guide/settings/\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n 'rest_framework.authentication.SessionAuthentication'\n ],\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_FILTER_BACKENDS': (\n 'django_filters.rest_framework.DjangoFilterBackend',\n 'rest_framework.filters.OrderingFilter',\n ),\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n 'EXCEPTION_HANDLER':\n 'mathesar.exception_handlers.mathesar_exception_handler',\n 'DEFAULT_SCHEMA_CLASS': 'drf_spectacular.openapi.AutoSchema'\n}\nSPECTACULAR_SETTINGS = {\n 'TITLE': 'Mathesar API',\n 'DESCRIPTION': '',\n 'VERSION': '1.0.0',\n 'SERVE_INCLUDE_SCHEMA': False,\n 'POSTPROCESSING_HOOKS': [\n 'config.settings.openapi.remove_url_prefix_hook',\n ],\n # OTHER SETTINGS\n}\nFRIENDLY_ERRORS = {\n 'FIELD_ERRORS': {\n # By default drf-friendly-errors does contain error codes for ListSerializer type\n 'ListSerializer': {\n 'required': 2007,\n 'null': 2027,\n 'invalid_choice': 2083,\n 'not_a_list': 2123,\n 'empty': 2093\n },\n 'PermittedPkRelatedField': {\n 'required': 2007,\n 'null': 2027,\n 'does_not_exist': 2151,\n 'incorrect_type': 2161\n },\n 'PermittedSlugRelatedField': {\n 'required': 2007, 'invalid': 2002, 'null': 2027,\n 'does_not_exist': 2151, 'incorrect_type': 2161\n },\n },\n 'EXCEPTION_DICT': {\n 'Http404': 4005\n }\n}\n# Mathesar settings\nMATHESAR_MODE = decouple_config('MODE', default='PRODUCTION')\nMATHESAR_UI_BUILD_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/mathesar/')\nMATHESAR_MANIFEST_LOCATION = os.path.join(MATHESAR_UI_BUILD_LOCATION, 'manifest.json')\nMATHESAR_CLIENT_DEV_URL = 'http://localhost:3000'\nMATHESAR_UI_SOURCE_LOCATION = os.path.join(BASE_DIR, 'mathesar_ui/')\nMATHESAR_CAPTURE_UNHANDLED_EXCEPTION = decouple_config('CAPTURE_UNHANDLED_EXCEPTION', default=False)\nMATHESAR_STATIC_NON_CODE_FILES_LOCATION = os.path.join(BASE_DIR, 'mathesar/static/non-code/')\n\n# UI source files have to be served by Django in order for static assets to be included during dev mode\n# https://vitejs.dev/guide/assets.html\n# https://vitejs.dev/guide/backend-integration.html\nSTATICFILES_DIRS = [MATHESAR_UI_SOURCE_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION] if MATHESAR_MODE == 'DEVELOPMENT' else [MATHESAR_UI_BUILD_LOCATION, MATHESAR_STATIC_NON_CODE_FILES_LOCATION]\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Accounts\nAUTH_USER_MODEL = 'mathesar.User'\nLOGIN_URL = '/auth/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = LOGIN_URL\nDRF_ACCESS_POLICY = {\n 'reusable_conditions': ['mathesar.api.permission_conditions']\n}\n# List of Template names that contains additional script tags to be added to the base template\nBASE_TEMPLATE_ADDITIONAL_SCRIPT_TEMPLATES = []\n", "path": "config/settings/common_settings.py"}, {"content": "def remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
| 3,636 | 270 |
gh_patches_debug_10804
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-2985
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add note about legal issues when using gTTS in public or commercial projects
originally opened in the OVOS plugin repo
> I've just posted an issue about potential legal issues with this API in the gTTS repo: [pndurette/gTTS#309](https://github.com/pndurette/gTTS/issues/309)
>
> From the plugin description people will only understand that the service can break anytime but there are more risks. Imagine someone will build a system that goes into "production" with this plugin active (you know how people are ^^) and commercializes it. In the best case Google will just ask the owner to shut it down, in the worst case they will get sued :-/.
>
> Personally I don't think an undocumented and unstable API moving in a legal gray area should be included in a system called OpenVoiceOS. For the same reasons it is not part of SEPIA OpenAssistant Framework.
>
> That said I know first hand how hard it is to find an alternative that has comparable quality, is free (=self-hosted) and fast enough to run on Raspberry Pi etc., but I think services like gTTS are just a distraction on the path to become really "open"!
links:
https://github.com/OpenVoiceOS/ovos-tts-plugin-google-tx/issues/1
https://github.com/pndurette/gTTS/issues/309
</issue>
<code>
[start of mycroft/tts/google_tts.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from gtts import gTTS
16 from gtts.lang import tts_langs
17
18 from .tts import TTS, TTSValidator
19
20 from mycroft.util.log import LOG
21
22 # Live list of languages
23 # Cached list of supported languages (2021-02-09)
24 _default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',
25 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',
26 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',
27 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',
28 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',
29 'fi': 'Finnish', 'fr': 'French', 'de': 'German',
30 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',
31 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',
32 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',
33 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',
34 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',
35 'ml': 'Malayalam', 'mr': 'Marathi',
36 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',
37 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',
38 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',
39 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',
40 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
41 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',
42 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',
43 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'
44 }
45
46
47 _supported_langs = None
48
49
50 def get_supported_langs():
51 """Get dict of supported languages.
52
53 Tries to fetch remote list, if that fails a local cache will be used.
54
55 Returns:
56 (dict): Lang code to lang name map.
57 """
58 global _supported_langs
59 if not _supported_langs:
60 try:
61 _supported_langs = tts_langs()
62 except Exception:
63 LOG.warning('Couldn\'t fetch upto date language codes')
64 return _supported_langs or _default_langs
65
66
67 class GoogleTTS(TTS):
68 """Interface to google TTS."""
69 def __init__(self, lang, config):
70 self._google_lang = None
71 super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
72 self), 'mp3')
73
74 @property
75 def google_lang(self):
76 """Property containing a converted language code suitable for gTTS."""
77 supported_langs = get_supported_langs()
78 if not self._google_lang:
79 if self.lang.lower() in supported_langs:
80 self._google_lang = self.lang.lower()
81 elif self.lang[:2].lower() in supported_langs:
82 self._google_lang = self.lang[:2]
83 return self._google_lang or self.lang.lower()
84
85 def get_tts(self, sentence, wav_file):
86 """Fetch tts audio using gTTS.
87
88 Args:
89 sentence (str): Sentence to generate audio for
90 wav_file (str): output file path
91 Returns:
92 Tuple ((str) written file, None)
93 """
94 tts = gTTS(text=sentence, lang=self.google_lang)
95 tts.save(wav_file)
96 return (wav_file, None) # No phonemes
97
98
99 class GoogleTTSValidator(TTSValidator):
100 def __init__(self, tts):
101 super(GoogleTTSValidator, self).__init__(tts)
102
103 def validate_lang(self):
104 lang = self.tts.google_lang
105 if lang.lower() not in get_supported_langs():
106 raise ValueError("Language not supported by gTTS: {}".format(lang))
107
108 def validate_connection(self):
109 try:
110 gTTS(text='Hi').save(self.tts.filename)
111 except Exception:
112 raise Exception(
113 'GoogleTTS server could not be verified. Please check your '
114 'internet connection.')
115
116 def get_tts_class(self):
117 return GoogleTTS
118
[end of mycroft/tts/google_tts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py
--- a/mycroft/tts/google_tts.py
+++ b/mycroft/tts/google_tts.py
@@ -70,6 +70,13 @@
self._google_lang = None
super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
self), 'mp3')
+ LOG.warning(
+ "The Google TTS module uses the gTTS Python package which itself "
+ "interfaces with the Google Translate text-to-speech API. This is "
+ "not intended for commercial or production usage. The service "
+ "may break at any time, and you are subject to their Terms of "
+ "Service that can be found at https://policies.google.com/terms"
+ )
@property
def google_lang(self):
|
{"golden_diff": "diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py\n--- a/mycroft/tts/google_tts.py\n+++ b/mycroft/tts/google_tts.py\n@@ -70,6 +70,13 @@\n self._google_lang = None\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n+ LOG.warning(\n+ \"The Google TTS module uses the gTTS Python package which itself \"\n+ \"interfaces with the Google Translate text-to-speech API. This is \"\n+ \"not intended for commercial or production usage. The service \"\n+ \"may break at any time, and you are subject to their Terms of \"\n+ \"Service that can be found at https://policies.google.com/terms\"\n+ )\n \n @property\n def google_lang(self):\n", "issue": "Add note about legal issues when using gTTS in public or commercial projects\noriginally opened in the OVOS plugin repo\r\n\r\n> I've just posted an issue about potential legal issues with this API in the gTTS repo: [pndurette/gTTS#309](https://github.com/pndurette/gTTS/issues/309)\r\n> \r\n> From the plugin description people will only understand that the service can break anytime but there are more risks. Imagine someone will build a system that goes into \"production\" with this plugin active (you know how people are ^^) and commercializes it. In the best case Google will just ask the owner to shut it down, in the worst case they will get sued :-/.\r\n> \r\n> Personally I don't think an undocumented and unstable API moving in a legal gray area should be included in a system called OpenVoiceOS. For the same reasons it is not part of SEPIA OpenAssistant Framework.\r\n> \r\n> That said I know first hand how hard it is to find an alternative that has comparable quality, is free (=self-hosted) and fast enough to run on Raspberry Pi etc., but I think services like gTTS are just a distraction on the path to become really \"open\"!\r\n\r\n\r\n\r\nlinks:\r\n\r\nhttps://github.com/OpenVoiceOS/ovos-tts-plugin-google-tx/issues/1\r\nhttps://github.com/pndurette/gTTS/issues/309\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\nfrom gtts.lang import tts_langs\n\nfrom .tts import TTS, TTSValidator\n\nfrom mycroft.util.log import LOG\n\n# Live list of languages\n# Cached list of supported languages (2021-02-09)\n_default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',\n 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',\n 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',\n 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',\n 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',\n 'fi': 'Finnish', 'fr': 'French', 'de': 'German',\n 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',\n 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',\n 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',\n 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',\n 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',\n 'ml': 'Malayalam', 'mr': 'Marathi',\n 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',\n 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',\n 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',\n 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',\n 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',\n 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',\n 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',\n 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'\n }\n\n\n_supported_langs = None\n\n\ndef get_supported_langs():\n \"\"\"Get dict of supported languages.\n\n Tries to fetch remote list, if that fails a local cache will be used.\n\n Returns:\n (dict): Lang code to lang name map.\n \"\"\"\n global _supported_langs\n if not _supported_langs:\n try:\n _supported_langs = tts_langs()\n except Exception:\n LOG.warning('Couldn\\'t fetch upto date language codes')\n return _supported_langs or _default_langs\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n self._google_lang = None\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n\n @property\n def google_lang(self):\n \"\"\"Property containing a converted language code suitable for gTTS.\"\"\"\n supported_langs = get_supported_langs()\n if not self._google_lang:\n if self.lang.lower() in supported_langs:\n self._google_lang = self.lang.lower()\n elif self.lang[:2].lower() in supported_langs:\n self._google_lang = self.lang[:2]\n return self._google_lang or self.lang.lower()\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Args:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.google_lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n lang = self.tts.google_lang\n if lang.lower() not in get_supported_langs():\n raise ValueError(\"Language not supported by gTTS: {}\".format(lang))\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}]}
| 2,270 | 195 |
gh_patches_debug_27808
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-2069
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support 2-pronoun sets in the pronoun module
### The problem
Setting pronouns in the pronouns module can be unintuitive because the bot accepts either a 1-pronoun "object" command (`.setpronouns she`) or a long 5-pronouns "subject/object/possessive-determiner/possessive-pronoun/reflexive" commnad (`.setpronouns she/her/her/hers/herself`). However, in my experience most people are used to the 2-pronoun convention of "subject/object" `.setpronouns she/her`).
### The solution
Supporting 2-word version of pronouns.
### Alternatives
An alternative would be to improve the message for unrecognised pronoun sets by adding explicit instructions to use either 1- or 5-pronoun sets.
</issue>
<code>
[start of sopel/modules/pronouns.py]
1 # coding=utf-8
2 """
3 pronouns.py - Sopel Pronouns Plugin
4 Copyright © 2016, Elsie Powell
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import absolute_import, division, print_function, unicode_literals
10
11 from sopel import plugin
12
13
14 # Copied from pronoun.is, leaving a *lot* out. If
15 # https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that
16 # would be a lot easier.
17 KNOWN_SETS = {
18 'ze': 'ze/hir/hir/hirs/hirself',
19 'ze/hir': 'ze/hir/hir/hirs/hirself',
20 'ze/zir': 'ze/zir/zir/zirs/zirself',
21 'they': 'they/them/their/theirs/themselves',
22 'they/.../themselves': 'they/them/their/theirs/themselves',
23 'they/.../themself': 'they/them/their/theirs/themself',
24 'she': 'she/her/her/hers/herself',
25 'he': 'he/him/his/his/himself',
26 'xey': 'xey/xem/xyr/xyrs/xemself',
27 'sie': 'sie/hir/hir/hirs/hirself',
28 'it': 'it/it/its/its/itself',
29 'ey': 'ey/em/eir/eirs/eirslef',
30 }
31
32
33 @plugin.command('pronouns')
34 @plugin.example('.pronouns Embolalia')
35 def pronouns(bot, trigger):
36 """Show the pronouns for a given user, defaulting to the current user if left blank."""
37 if not trigger.group(3):
38 pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')
39 if pronouns:
40 say_pronouns(bot, trigger.nick, pronouns)
41 else:
42 bot.reply("I don't know your pronouns! You can set them with "
43 "{}setpronouns".format(bot.config.core.help_prefix))
44 else:
45 pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')
46 if pronouns:
47 say_pronouns(bot, trigger.group(3), pronouns)
48 elif trigger.group(3) == bot.nick:
49 # You can stuff an entry into the database manually for your bot's
50 # gender, but like… it's a bot.
51 bot.say(
52 "I am a bot. Beep boop. My pronouns are it/it/its/its/itself. "
53 "See https://pronoun.is/it for examples."
54 )
55 else:
56 bot.reply("I don't know {}'s pronouns. They can set them with "
57 "{}setpronouns".format(trigger.group(3),
58 bot.config.core.help_prefix))
59
60
61 def say_pronouns(bot, nick, pronouns):
62 for short, set_ in KNOWN_SETS.items():
63 if pronouns == set_:
64 break
65 short = pronouns
66
67 bot.say("{}'s pronouns are {}. See https://pronoun.is/{} for "
68 "examples.".format(nick, pronouns, short))
69
70
71 @plugin.command('setpronouns')
72 @plugin.example('.setpronouns they/them/their/theirs/themselves')
73 def set_pronouns(bot, trigger):
74 pronouns = trigger.group(2)
75 """Set your pronouns."""
76 if not pronouns:
77 bot.reply('What pronouns do you use?')
78 return
79
80 disambig = ''
81 if pronouns == 'they':
82 disambig = ' You can also use they/.../themself, if you prefer.'
83 pronouns = KNOWN_SETS.get(pronouns)
84 elif pronouns == 'ze':
85 disambig = ' I have ze/hir. If you meant ze/zir, you can use that instead.'
86 pronouns = KNOWN_SETS.get(pronouns)
87 elif len(pronouns.split('/')) != 5:
88 pronouns = KNOWN_SETS.get(pronouns)
89 if not pronouns:
90 bot.reply(
91 "I'm sorry, I don't know those pronouns. "
92 "You can give me a set I don't know by formatting it "
93 "subject/object/possessive-determiner/possessive-pronoun/"
94 "reflexive, as in: they/them/their/theirs/themselves"
95 )
96 return
97 bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)
98 bot.reply("Thanks for telling me!" + disambig)
99
[end of sopel/modules/pronouns.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py
--- a/sopel/modules/pronouns.py
+++ b/sopel/modules/pronouns.py
@@ -19,14 +19,21 @@
'ze/hir': 'ze/hir/hir/hirs/hirself',
'ze/zir': 'ze/zir/zir/zirs/zirself',
'they': 'they/them/their/theirs/themselves',
+ 'they/them': 'they/them/their/theirs/themselves',
'they/.../themselves': 'they/them/their/theirs/themselves',
'they/.../themself': 'they/them/their/theirs/themself',
'she': 'she/her/her/hers/herself',
+ 'she/her': 'she/her/her/hers/herself',
'he': 'he/him/his/his/himself',
+ 'he/him': 'he/him/his/his/himself',
'xey': 'xey/xem/xyr/xyrs/xemself',
+ 'xey/xem': 'xey/xem/xyr/xyrs/xemself',
'sie': 'sie/hir/hir/hirs/hirself',
+ 'sie/hir': 'sie/hir/hir/hirs/hirself',
'it': 'it/it/its/its/itself',
- 'ey': 'ey/em/eir/eirs/eirslef',
+ 'it/it': 'it/it/its/its/itself',
+ 'ey': 'ey/em/eir/eirs/eirself',
+ 'ey/em': 'ey/em/eir/eirs/eirself',
}
@@ -71,8 +78,8 @@
@plugin.command('setpronouns')
@plugin.example('.setpronouns they/them/their/theirs/themselves')
def set_pronouns(bot, trigger):
- pronouns = trigger.group(2)
"""Set your pronouns."""
+ pronouns = trigger.group(2)
if not pronouns:
bot.reply('What pronouns do you use?')
return
|
{"golden_diff": "diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py\n--- a/sopel/modules/pronouns.py\n+++ b/sopel/modules/pronouns.py\n@@ -19,14 +19,21 @@\n 'ze/hir': 'ze/hir/hir/hirs/hirself',\n 'ze/zir': 'ze/zir/zir/zirs/zirself',\n 'they': 'they/them/their/theirs/themselves',\n+ 'they/them': 'they/them/their/theirs/themselves',\n 'they/.../themselves': 'they/them/their/theirs/themselves',\n 'they/.../themself': 'they/them/their/theirs/themself',\n 'she': 'she/her/her/hers/herself',\n+ 'she/her': 'she/her/her/hers/herself',\n 'he': 'he/him/his/his/himself',\n+ 'he/him': 'he/him/his/his/himself',\n 'xey': 'xey/xem/xyr/xyrs/xemself',\n+ 'xey/xem': 'xey/xem/xyr/xyrs/xemself',\n 'sie': 'sie/hir/hir/hirs/hirself',\n+ 'sie/hir': 'sie/hir/hir/hirs/hirself',\n 'it': 'it/it/its/its/itself',\n- 'ey': 'ey/em/eir/eirs/eirslef',\n+ 'it/it': 'it/it/its/its/itself',\n+ 'ey': 'ey/em/eir/eirs/eirself',\n+ 'ey/em': 'ey/em/eir/eirs/eirself',\n }\n \n \n@@ -71,8 +78,8 @@\n @plugin.command('setpronouns')\n @plugin.example('.setpronouns they/them/their/theirs/themselves')\n def set_pronouns(bot, trigger):\n- pronouns = trigger.group(2)\n \"\"\"Set your pronouns.\"\"\"\n+ pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n", "issue": "Support 2-pronoun sets in the pronoun module \n### The problem\r\n\r\nSetting pronouns in the pronouns module can be unintuitive because the bot accepts either a 1-pronoun \"object\" command (`.setpronouns she`) or a long 5-pronouns \"subject/object/possessive-determiner/possessive-pronoun/reflexive\" commnad (`.setpronouns she/her/her/hers/herself`). However, in my experience most people are used to the 2-pronoun convention of \"subject/object\" `.setpronouns she/her`). \r\n\r\n### The solution\r\n\r\nSupporting 2-word version of pronouns. \r\n\r\n### Alternatives\r\n\r\nAn alternative would be to improve the message for unrecognised pronoun sets by adding explicit instructions to use either 1- or 5-pronoun sets. \r\n\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\nKNOWN_SETS = {\n 'ze': 'ze/hir/hir/hirs/hirself',\n 'ze/hir': 'ze/hir/hir/hirs/hirself',\n 'ze/zir': 'ze/zir/zir/zirs/zirself',\n 'they': 'they/them/their/theirs/themselves',\n 'they/.../themselves': 'they/them/their/theirs/themselves',\n 'they/.../themself': 'they/them/their/theirs/themself',\n 'she': 'she/her/her/hers/herself',\n 'he': 'he/him/his/his/himself',\n 'xey': 'xey/xem/xyr/xyrs/xemself',\n 'sie': 'sie/hir/hir/hirs/hirself',\n 'it': 'it/it/its/its/itself',\n 'ey': 'ey/em/eir/eirs/eirslef',\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns they/them/their/theirs/themselves')\ndef set_pronouns(bot, trigger):\n pronouns = trigger.group(2)\n \"\"\"Set your pronouns.\"\"\"\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n if pronouns == 'they':\n disambig = ' You can also use they/.../themself, if you prefer.'\n pronouns = KNOWN_SETS.get(pronouns)\n elif pronouns == 'ze':\n disambig = ' I have ze/hir. If you meant ze/zir, you can use that instead.'\n pronouns = KNOWN_SETS.get(pronouns)\n elif len(pronouns.split('/')) != 5:\n pronouns = KNOWN_SETS.get(pronouns)\n if not pronouns:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\"Thanks for telling me!\" + disambig)\n", "path": "sopel/modules/pronouns.py"}]}
| 1,926 | 496 |
gh_patches_debug_21787
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-3121
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warn on empty Fluent rich editor fields
> In addition to the bug, I believe this is a UX regression? We should show a warning that one of the values is empty when trying to save such message.
The current practice is to consider any content in a Fluent value or attribute to qualify the whole message as not-empty. The check for this was added in #2675. Previously empty attributes may indeed have resulted in errors, due to stringifying as invalid Fluent.
_Originally posted by @eemeli in https://github.com/mozilla/pontoon/issues/2973#issuecomment-1750344569_
</issue>
<code>
[start of pontoon/checks/libraries/pontoon_db.py]
1 import html
2 import re
3
4 import bleach
5
6 from collections import defaultdict
7 from fluent.syntax import FluentParser, ast
8 from fluent.syntax.visitor import Visitor
9
10 from pontoon.sync.formats.ftl import localizable_entries
11
12
13 MAX_LENGTH_RE = re.compile(r"MAX_LENGTH:( *)(\d+)", re.MULTILINE)
14 parser = FluentParser()
15
16
17 def get_max_length(comment):
18 """
19 Return max length value for an entity with MAX_LENTH.
20 """
21 max_length = re.findall(MAX_LENGTH_RE, comment or "")
22
23 if max_length:
24 return int(max_length[0][1])
25
26 return None
27
28
29 class IsEmptyVisitor(Visitor):
30 def __init__(self):
31 self.is_empty = True
32
33 def visit_Placeable(self, node):
34 if isinstance(node.expression, ast.Literal):
35 if node.expression.parse()["value"]:
36 self.is_empty = False
37 elif isinstance(node.expression, ast.SelectExpression):
38 self.generic_visit(node.expression)
39 else:
40 self.is_empty = False
41
42 def visit_TextElement(self, node):
43 if node.value:
44 self.is_empty = False
45
46
47 def run_checks(entity, original, string):
48 """
49 Group all checks related to the base UI that get stored in the DB
50 :arg pontoon.base.models.Entity entity: Source entity
51 :arg basestring original: an original string
52 :arg basestring string: a translation
53 """
54 checks = defaultdict(list)
55 resource_ext = entity.resource.format
56
57 if resource_ext == "lang":
58 # Newlines are not allowed in .lang files (bug 1190754)
59 if "\n" in string:
60 checks["pErrors"].append("Newline characters are not allowed")
61
62 # Prevent translations exceeding the given length limit
63 max_length = get_max_length(entity.comment)
64
65 if max_length:
66 string_length = len(
67 html.unescape(bleach.clean(string, strip=True, tags=()))
68 )
69
70 if string_length > max_length:
71 checks["pErrors"].append("Translation too long")
72
73 # Bug 1599056: Original and translation must either both end in a newline,
74 # or none of them should.
75 if resource_ext == "po":
76 if original.endswith("\n") != string.endswith("\n"):
77 checks["pErrors"].append("Ending newline mismatch")
78
79 # Prevent empty translation submissions if not supported
80 if string == "" and not entity.resource.allows_empty_translations:
81 checks["pErrors"].append("Empty translations are not allowed")
82
83 # FTL checks
84 if resource_ext == "ftl" and string != "":
85 translation_ast = parser.parse_entry(string)
86 entity_ast = parser.parse_entry(entity.string)
87
88 # Parse error
89 if isinstance(translation_ast, ast.Junk):
90 checks["pErrors"].append(translation_ast.annotations[0].message)
91
92 # Not a localizable entry
93 elif not isinstance(translation_ast, localizable_entries):
94 checks["pErrors"].append(
95 "Translation needs to be a valid localizable entry"
96 )
97
98 # Message ID mismatch
99 elif entity_ast.id.name != translation_ast.id.name:
100 checks["pErrors"].append("Translation key needs to match source string key")
101
102 # Empty translation entry warning; set here rather than pontoon_non_db.py
103 # to avoid needing to parse the Fluent message twice.
104 else:
105 visitor = IsEmptyVisitor()
106 visitor.visit(translation_ast)
107 if visitor.is_empty:
108 checks["pndbWarnings"].append("Empty translation")
109
110 return checks
111
[end of pontoon/checks/libraries/pontoon_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pontoon/checks/libraries/pontoon_db.py b/pontoon/checks/libraries/pontoon_db.py
--- a/pontoon/checks/libraries/pontoon_db.py
+++ b/pontoon/checks/libraries/pontoon_db.py
@@ -28,20 +28,27 @@
class IsEmptyVisitor(Visitor):
def __init__(self):
- self.is_empty = True
+ self.is_empty = False
+ self.is_pattern_empty = True
+
+ def visit_Pattern(self, node):
+ self.is_pattern_empty = True
+ self.visit(node.elements)
+ if self.is_pattern_empty:
+ self.is_empty = True
def visit_Placeable(self, node):
if isinstance(node.expression, ast.Literal):
if node.expression.parse()["value"]:
- self.is_empty = False
+ self.is_pattern_empty = False
elif isinstance(node.expression, ast.SelectExpression):
self.generic_visit(node.expression)
else:
- self.is_empty = False
+ self.is_pattern_empty = False
def visit_TextElement(self, node):
if node.value:
- self.is_empty = False
+ self.is_pattern_empty = False
def run_checks(entity, original, string):
|
{"golden_diff": "diff --git a/pontoon/checks/libraries/pontoon_db.py b/pontoon/checks/libraries/pontoon_db.py\n--- a/pontoon/checks/libraries/pontoon_db.py\n+++ b/pontoon/checks/libraries/pontoon_db.py\n@@ -28,20 +28,27 @@\n \n class IsEmptyVisitor(Visitor):\n def __init__(self):\n- self.is_empty = True\n+ self.is_empty = False\n+ self.is_pattern_empty = True\n+\n+ def visit_Pattern(self, node):\n+ self.is_pattern_empty = True\n+ self.visit(node.elements)\n+ if self.is_pattern_empty:\n+ self.is_empty = True\n \n def visit_Placeable(self, node):\n if isinstance(node.expression, ast.Literal):\n if node.expression.parse()[\"value\"]:\n- self.is_empty = False\n+ self.is_pattern_empty = False\n elif isinstance(node.expression, ast.SelectExpression):\n self.generic_visit(node.expression)\n else:\n- self.is_empty = False\n+ self.is_pattern_empty = False\n \n def visit_TextElement(self, node):\n if node.value:\n- self.is_empty = False\n+ self.is_pattern_empty = False\n \n \n def run_checks(entity, original, string):\n", "issue": "Warn on empty Fluent rich editor fields\n> In addition to the bug, I believe this is a UX regression? We should show a warning that one of the values is empty when trying to save such message.\r\n\r\nThe current practice is to consider any content in a Fluent value or attribute to qualify the whole message as not-empty. The check for this was added in #2675. Previously empty attributes may indeed have resulted in errors, due to stringifying as invalid Fluent.\r\n\r\n_Originally posted by @eemeli in https://github.com/mozilla/pontoon/issues/2973#issuecomment-1750344569_\r\n \n", "before_files": [{"content": "import html\nimport re\n\nimport bleach\n\nfrom collections import defaultdict\nfrom fluent.syntax import FluentParser, ast\nfrom fluent.syntax.visitor import Visitor\n\nfrom pontoon.sync.formats.ftl import localizable_entries\n\n\nMAX_LENGTH_RE = re.compile(r\"MAX_LENGTH:( *)(\\d+)\", re.MULTILINE)\nparser = FluentParser()\n\n\ndef get_max_length(comment):\n \"\"\"\n Return max length value for an entity with MAX_LENTH.\n \"\"\"\n max_length = re.findall(MAX_LENGTH_RE, comment or \"\")\n\n if max_length:\n return int(max_length[0][1])\n\n return None\n\n\nclass IsEmptyVisitor(Visitor):\n def __init__(self):\n self.is_empty = True\n\n def visit_Placeable(self, node):\n if isinstance(node.expression, ast.Literal):\n if node.expression.parse()[\"value\"]:\n self.is_empty = False\n elif isinstance(node.expression, ast.SelectExpression):\n self.generic_visit(node.expression)\n else:\n self.is_empty = False\n\n def visit_TextElement(self, node):\n if node.value:\n self.is_empty = False\n\n\ndef run_checks(entity, original, string):\n \"\"\"\n Group all checks related to the base UI that get stored in the DB\n :arg pontoon.base.models.Entity entity: Source entity\n :arg basestring original: an original string\n :arg basestring string: a translation\n \"\"\"\n checks = defaultdict(list)\n resource_ext = entity.resource.format\n\n if resource_ext == \"lang\":\n # Newlines are not allowed in .lang files (bug 1190754)\n if \"\\n\" in string:\n checks[\"pErrors\"].append(\"Newline characters are not allowed\")\n\n # Prevent translations exceeding the given length limit\n max_length = get_max_length(entity.comment)\n\n if max_length:\n string_length = len(\n html.unescape(bleach.clean(string, strip=True, tags=()))\n )\n\n if string_length > max_length:\n checks[\"pErrors\"].append(\"Translation too long\")\n\n # Bug 1599056: Original and translation must either both end in a newline,\n # or none of them should.\n if resource_ext == \"po\":\n if original.endswith(\"\\n\") != string.endswith(\"\\n\"):\n checks[\"pErrors\"].append(\"Ending newline mismatch\")\n\n # Prevent empty translation submissions if not supported\n if string == \"\" and not entity.resource.allows_empty_translations:\n checks[\"pErrors\"].append(\"Empty translations are not allowed\")\n\n # FTL checks\n if resource_ext == \"ftl\" and string != \"\":\n translation_ast = parser.parse_entry(string)\n entity_ast = parser.parse_entry(entity.string)\n\n # Parse error\n if isinstance(translation_ast, ast.Junk):\n checks[\"pErrors\"].append(translation_ast.annotations[0].message)\n\n # Not a localizable entry\n elif not isinstance(translation_ast, localizable_entries):\n checks[\"pErrors\"].append(\n \"Translation needs to be a valid localizable entry\"\n )\n\n # Message ID mismatch\n elif entity_ast.id.name != translation_ast.id.name:\n checks[\"pErrors\"].append(\"Translation key needs to match source string key\")\n\n # Empty translation entry warning; set here rather than pontoon_non_db.py\n # to avoid needing to parse the Fluent message twice.\n else:\n visitor = IsEmptyVisitor()\n visitor.visit(translation_ast)\n if visitor.is_empty:\n checks[\"pndbWarnings\"].append(\"Empty translation\")\n\n return checks\n", "path": "pontoon/checks/libraries/pontoon_db.py"}]}
| 1,681 | 280 |
gh_patches_debug_10412
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-19382
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CenterCrop.compute_output_shape() does not consider the list compatibility
When the input is a list of tensor, CenterCrop can correctly generate the output, while` CenterCrop.compute_output_shape()` will generate a wrong shape. Please refer to this [gist](https://colab.research.google.com/drive/1JLuEr0-dNCmawTb2dRX7kaSxOKGZ6Obv?usp=sharing).
Interestingly, I found the test code in [center_crop_test.py, ](https://github.com/keras-team/keras/blob/master/keras/layers/preprocessing/center_crop_test.py#L178) has tested the list compatibility, while its `compute_output_shape` function missed it.
</issue>
<code>
[start of keras/layers/preprocessing/center_crop.py]
1 from keras import backend
2 from keras.api_export import keras_export
3 from keras.layers.preprocessing.tf_data_layer import TFDataLayer
4 from keras.utils import image_utils
5
6
7 @keras_export("keras.layers.CenterCrop")
8 class CenterCrop(TFDataLayer):
9 """A preprocessing layer which crops images.
10
11 This layers crops the central portion of the images to a target size. If an
12 image is smaller than the target size, it will be resized and cropped
13 so as to return the largest possible window in the image that matches
14 the target aspect ratio.
15
16 Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).
17
18 Input shape:
19 3D (unbatched) or 4D (batched) tensor with shape:
20 `(..., height, width, channels)`, in `"channels_last"` format,
21 or `(..., channels, height, width)`, in `"channels_first"` format.
22
23 Output shape:
24 3D (unbatched) or 4D (batched) tensor with shape:
25 `(..., target_height, target_width, channels)`,
26 or `(..., channels, target_height, target_width)`,
27 in `"channels_first"` format.
28
29 If the input height/width is even and the target height/width is odd (or
30 inversely), the input image is left-padded by 1 pixel.
31
32 **Note:** This layer is safe to use inside a `tf.data` pipeline
33 (independently of which backend you're using).
34
35 Args:
36 height: Integer, the height of the output shape.
37 width: Integer, the width of the output shape.
38 data_format: string, either `"channels_last"` or `"channels_first"`.
39 The ordering of the dimensions in the inputs. `"channels_last"`
40 corresponds to inputs with shape `(batch, height, width, channels)`
41 while `"channels_first"` corresponds to inputs with shape
42 `(batch, channels, height, width)`. It defaults to the
43 `image_data_format` value found in your Keras config file at
44 `~/.keras/keras.json`. If you never set it, then it will be
45 `"channels_last"`.
46 """
47
48 def __init__(self, height, width, data_format=None, **kwargs):
49 super().__init__(**kwargs)
50 self.height = height
51 self.width = width
52 self.data_format = backend.standardize_data_format(data_format)
53
54 def call(self, inputs):
55 inputs = self.backend.cast(inputs, self.compute_dtype)
56 if self.data_format == "channels_first":
57 init_height = inputs.shape[-2]
58 init_width = inputs.shape[-1]
59 else:
60 init_height = inputs.shape[-3]
61 init_width = inputs.shape[-2]
62
63 if init_height is None or init_width is None:
64 # Dynamic size case. TODO.
65 raise ValueError(
66 "At this time, CenterCrop can only "
67 "process images with a static spatial "
68 f"shape. Received: inputs.shape={inputs.shape}"
69 )
70
71 h_diff = init_height - self.height
72 w_diff = init_width - self.width
73
74 h_start = int(h_diff / 2)
75 w_start = int(w_diff / 2)
76
77 if h_diff >= 0 and w_diff >= 0:
78 if len(inputs.shape) == 4:
79 if self.data_format == "channels_first":
80 return inputs[
81 :,
82 :,
83 h_start : h_start + self.height,
84 w_start : w_start + self.width,
85 ]
86 return inputs[
87 :,
88 h_start : h_start + self.height,
89 w_start : w_start + self.width,
90 :,
91 ]
92 elif len(inputs.shape) == 3:
93 if self.data_format == "channels_first":
94 return inputs[
95 :,
96 h_start : h_start + self.height,
97 w_start : w_start + self.width,
98 ]
99 return inputs[
100 h_start : h_start + self.height,
101 w_start : w_start + self.width,
102 :,
103 ]
104
105 return image_utils.smart_resize(
106 inputs,
107 [self.height, self.width],
108 data_format=self.data_format,
109 backend_module=self.backend,
110 )
111
112 def compute_output_shape(self, input_shape):
113 input_shape = list(input_shape)
114 if len(input_shape) == 4:
115 if self.data_format == "channels_last":
116 input_shape[1] = self.height
117 input_shape[2] = self.width
118 else:
119 input_shape[2] = self.height
120 input_shape[3] = self.width
121 else:
122 if self.data_format == "channels_last":
123 input_shape[0] = self.height
124 input_shape[1] = self.width
125 else:
126 input_shape[1] = self.height
127 input_shape[2] = self.width
128 return tuple(input_shape)
129
130 def get_config(self):
131 base_config = super().get_config()
132 config = {
133 "height": self.height,
134 "width": self.width,
135 "data_format": self.data_format,
136 }
137 return {**base_config, **config}
138
[end of keras/layers/preprocessing/center_crop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/keras/layers/preprocessing/center_crop.py b/keras/layers/preprocessing/center_crop.py
--- a/keras/layers/preprocessing/center_crop.py
+++ b/keras/layers/preprocessing/center_crop.py
@@ -111,6 +111,13 @@
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
+ if isinstance(input_shape[0], (list, tuple)) or len(
+ input_shape
+ ) not in (3, 4):
+ raise ValueError(
+ "`input_shape` must be a non-nested tuple or list "
+ "of rank-1 with size 3 (unbatched) or 4 (batched). "
+ )
if len(input_shape) == 4:
if self.data_format == "channels_last":
input_shape[1] = self.height
|
{"golden_diff": "diff --git a/keras/layers/preprocessing/center_crop.py b/keras/layers/preprocessing/center_crop.py\n--- a/keras/layers/preprocessing/center_crop.py\n+++ b/keras/layers/preprocessing/center_crop.py\n@@ -111,6 +111,13 @@\n \n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n+ if isinstance(input_shape[0], (list, tuple)) or len(\n+ input_shape\n+ ) not in (3, 4):\n+ raise ValueError(\n+ \"`input_shape` must be a non-nested tuple or list \"\n+ \"of rank-1 with size 3 (unbatched) or 4 (batched). \"\n+ )\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n", "issue": "CenterCrop.compute_output_shape() does not consider the list compatibility\nWhen the input is a list of tensor, CenterCrop can correctly generate the output, while` CenterCrop.compute_output_shape()` will generate a wrong shape. Please refer to this [gist](https://colab.research.google.com/drive/1JLuEr0-dNCmawTb2dRX7kaSxOKGZ6Obv?usp=sharing).\r\n\r\nInterestingly, I found the test code in [center_crop_test.py, ](https://github.com/keras-team/keras/blob/master/keras/layers/preprocessing/center_crop_test.py#L178) has tested the list compatibility, while its `compute_output_shape` function missed it.\n", "before_files": [{"content": "from keras import backend\nfrom keras.api_export import keras_export\nfrom keras.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras.utils import image_utils\n\n\n@keras_export(\"keras.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n inputs = self.backend.cast(inputs, self.compute_dtype)\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n", "path": "keras/layers/preprocessing/center_crop.py"}]}
| 2,130 | 201 |
gh_patches_debug_9086
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-2439
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Seemingly unnecessary dictionary comprehension in contrib's BaseOutputHandler
Hi,
`output_dict` is made sure to be a dictionary just above the line below.
https://github.com/pytorch/ignite/blob/21a2ccc25dd7ff0aec5f1538387597cb093fe36e/ignite/contrib/handlers/base_logger.py#L108
So this line could be replaced with:
```python
metrics_state_attrs.update(output_dict)
```
Seemingly unnecessary dictionary comprehension in contrib's BaseOutputHandler
Hi,
`output_dict` is made sure to be a dictionary just above the line below.
https://github.com/pytorch/ignite/blob/21a2ccc25dd7ff0aec5f1538387597cb093fe36e/ignite/contrib/handlers/base_logger.py#L108
So this line could be replaced with:
```python
metrics_state_attrs.update(output_dict)
```
</issue>
<code>
[start of ignite/contrib/handlers/base_logger.py]
1 """Base logger and its helper handlers."""
2 import numbers
3 import warnings
4 from abc import ABCMeta, abstractmethod
5 from collections import OrderedDict
6 from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
7
8 import torch
9 import torch.nn as nn
10 from torch.optim import Optimizer
11
12 from ignite.engine import Engine, Events, EventsList, State
13 from ignite.engine.events import CallableEventWithFilter, RemovableEventHandle
14
15
16 class BaseHandler(metaclass=ABCMeta):
17 """Base handler for defining various useful handlers."""
18
19 @abstractmethod
20 def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:
21 pass
22
23
24 class BaseOptimizerParamsHandler(BaseHandler):
25 """
26 Base handler for logging optimizer parameters
27 """
28
29 def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
30 if not (
31 isinstance(optimizer, Optimizer)
32 or (hasattr(optimizer, "param_groups") and isinstance(optimizer.param_groups, Sequence))
33 ):
34 raise TypeError(
35 "Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, "
36 f"but given {type(optimizer)}"
37 )
38
39 self.optimizer = optimizer
40 self.param_name = param_name
41 self.tag = tag
42
43
44 class BaseOutputHandler(BaseHandler):
45 """
46 Helper handler to log engine's output and/or metrics
47 """
48
49 def __init__(
50 self,
51 tag: str,
52 metric_names: Optional[Union[str, List[str]]] = None,
53 output_transform: Optional[Callable] = None,
54 global_step_transform: Optional[Callable] = None,
55 state_attributes: Optional[List[str]] = None,
56 ):
57
58 if metric_names is not None:
59 if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == "all")):
60 raise TypeError(
61 f"metric_names should be either a list or equal 'all', got {type(metric_names)} instead."
62 )
63
64 if output_transform is not None and not callable(output_transform):
65 raise TypeError(f"output_transform should be a function, got {type(output_transform)} instead.")
66
67 if output_transform is None and metric_names is None and state_attributes is None:
68 raise ValueError("Either metric_names, output_transform or state_attributes should be defined")
69
70 if global_step_transform is not None and not callable(global_step_transform):
71 raise TypeError(f"global_step_transform should be a function, got {type(global_step_transform)} instead.")
72
73 if global_step_transform is None:
74
75 def global_step_transform(engine: Engine, event_name: Union[str, Events]) -> int:
76 return engine.state.get_event_attrib_value(event_name)
77
78 self.tag = tag
79 self.metric_names = metric_names
80 self.output_transform = output_transform
81 self.global_step_transform = global_step_transform
82 self.state_attributes = state_attributes
83
84 def _setup_output_metrics_state_attrs(
85 self, engine: Engine, log_text: Optional[bool] = False, key_tuple: Optional[bool] = True
86 ) -> Dict[Any, Any]:
87 """Helper method to setup metrics and state attributes to log"""
88 metrics_state_attrs = OrderedDict()
89 if self.metric_names is not None:
90 if isinstance(self.metric_names, str) and self.metric_names == "all":
91 metrics_state_attrs = OrderedDict(engine.state.metrics)
92 else:
93 for name in self.metric_names:
94 if name not in engine.state.metrics:
95 warnings.warn(
96 f"Provided metric name '{name}' is missing "
97 f"in engine's state metrics: {list(engine.state.metrics.keys())}"
98 )
99 continue
100 metrics_state_attrs[name] = engine.state.metrics[name]
101
102 if self.output_transform is not None:
103 output_dict = self.output_transform(engine.state.output)
104
105 if not isinstance(output_dict, dict):
106 output_dict = {"output": output_dict}
107
108 metrics_state_attrs.update({name: value for name, value in output_dict.items()})
109
110 if self.state_attributes is not None:
111 metrics_state_attrs.update({name: getattr(engine.state, name, None) for name in self.state_attributes})
112
113 metrics_state_attrs_dict = OrderedDict() # type: Dict[Any, Union[str, float, numbers.Number]]
114
115 def key_tuple_tf(tag: str, name: str, *args: str) -> Tuple[str, ...]:
116 return (tag, name) + args
117
118 def key_str_tf(tag: str, name: str, *args: str) -> str:
119 return "/".join((tag, name) + args)
120
121 key_tf = key_tuple_tf if key_tuple else key_str_tf
122
123 for name, value in metrics_state_attrs.items():
124 if isinstance(value, numbers.Number):
125 metrics_state_attrs_dict[key_tf(self.tag, name)] = value
126 elif isinstance(value, torch.Tensor) and value.ndimension() == 0:
127 metrics_state_attrs_dict[key_tf(self.tag, name)] = value.item()
128 elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
129 for i, v in enumerate(value):
130 metrics_state_attrs_dict[key_tf(self.tag, name, str(i))] = v.item()
131 else:
132 if isinstance(value, str) and log_text:
133 metrics_state_attrs_dict[key_tf(self.tag, name)] = value
134 else:
135 warnings.warn(f"Logger output_handler can not log metrics value type {type(value)}")
136 return metrics_state_attrs_dict
137
138
139 class BaseWeightsScalarHandler(BaseHandler):
140 """
141 Helper handler to log model's weights as scalars.
142 """
143
144 def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
145 if not isinstance(model, torch.nn.Module):
146 raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}")
147
148 if not callable(reduction):
149 raise TypeError(f"Argument reduction should be callable, but given {type(reduction)}")
150
151 def _is_0D_tensor(t: torch.Tensor) -> bool:
152 return isinstance(t, torch.Tensor) and t.ndimension() == 0
153
154 # Test reduction function on a tensor
155 o = reduction(torch.ones(4, 2))
156 if not (isinstance(o, numbers.Number) or _is_0D_tensor(o)):
157 raise TypeError(f"Output of the reduction function should be a scalar, but got {type(o)}")
158
159 self.model = model
160 self.reduction = reduction
161 self.tag = tag
162
163
164 class BaseWeightsHistHandler(BaseHandler):
165 """
166 Helper handler to log model's weights as histograms.
167 """
168
169 def __init__(self, model: nn.Module, tag: Optional[str] = None):
170 if not isinstance(model, torch.nn.Module):
171 raise TypeError(f"Argument model should be of type torch.nn.Module, but given {type(model)}")
172
173 self.model = model
174 self.tag = tag
175
176
177 class BaseLogger(metaclass=ABCMeta):
178 """
179 Base logger handler. See implementations: TensorboardLogger, VisdomLogger, PolyaxonLogger, MLflowLogger, ...
180
181 """
182
183 def attach(
184 self,
185 engine: Engine,
186 log_handler: Callable,
187 event_name: Union[str, Events, CallableEventWithFilter, EventsList],
188 *args: Any,
189 **kwargs: Any,
190 ) -> RemovableEventHandle:
191 """Attach the logger to the engine and execute `log_handler` function at `event_name` events.
192
193 Args:
194 engine: engine object.
195 log_handler: a logging handler to execute
196 event_name: event to attach the logging handler to. Valid events are from
197 :class:`~ignite.engine.events.Events` or :class:`~ignite.engine.events.EventsList` or any `event_name`
198 added by :meth:`~ignite.engine.engine.Engine.register_events`.
199 args: args forwarded to the `log_handler` method
200 kwargs: kwargs forwarded to the `log_handler` method
201
202 Returns:
203 :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
204 """
205 if isinstance(event_name, EventsList):
206 for name in event_name:
207 if name not in State.event_to_attr:
208 raise RuntimeError(f"Unknown event name '{name}'")
209 engine.add_event_handler(name, log_handler, self, name)
210
211 return RemovableEventHandle(event_name, log_handler, engine)
212
213 else:
214
215 if event_name not in State.event_to_attr:
216 raise RuntimeError(f"Unknown event name '{event_name}'")
217
218 return engine.add_event_handler(event_name, log_handler, self, event_name, *args, **kwargs)
219
220 def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any) -> RemovableEventHandle:
221 """Shortcut method to attach `OutputHandler` to the logger.
222
223 Args:
224 engine: engine object.
225 event_name: event to attach the logging handler to. Valid events are from
226 :class:`~ignite.engine.events.Events` or any `event_name` added by
227 :meth:`~ignite.engine.engine.Engine.register_events`.
228 args: args to initialize `OutputHandler`
229 kwargs: kwargs to initialize `OutputHandler`
230
231 Returns:
232 :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
233 """
234 return self.attach(engine, self._create_output_handler(*args, **kwargs), event_name=event_name)
235
236 def attach_opt_params_handler(
237 self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any
238 ) -> RemovableEventHandle:
239 """Shortcut method to attach `OptimizerParamsHandler` to the logger.
240
241 Args:
242 engine: engine object.
243 event_name: event to attach the logging handler to. Valid events are from
244 :class:`~ignite.engine.events.Events` or any `event_name` added by
245 :meth:`~ignite.engine.engine.Engine.register_events`.
246 args: args to initialize `OptimizerParamsHandler`
247 kwargs: kwargs to initialize `OptimizerParamsHandler`
248
249 Returns:
250 :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.
251
252 .. versionchanged:: 0.4.3
253 Added missing return statement.
254 """
255 return self.attach(engine, self._create_opt_params_handler(*args, **kwargs), event_name=event_name)
256
257 @abstractmethod
258 def _create_output_handler(self, engine: Engine, *args: Any, **kwargs: Any) -> Callable:
259 pass
260
261 @abstractmethod
262 def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> Callable:
263 pass
264
265 def __enter__(self) -> "BaseLogger":
266 return self
267
268 def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
269 self.close()
270
271 def close(self) -> None:
272 pass
273
[end of ignite/contrib/handlers/base_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py
--- a/ignite/contrib/handlers/base_logger.py
+++ b/ignite/contrib/handlers/base_logger.py
@@ -105,7 +105,7 @@
if not isinstance(output_dict, dict):
output_dict = {"output": output_dict}
- metrics_state_attrs.update({name: value for name, value in output_dict.items()})
+ metrics_state_attrs.update(output_dict)
if self.state_attributes is not None:
metrics_state_attrs.update({name: getattr(engine.state, name, None) for name in self.state_attributes})
|
{"golden_diff": "diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py\n--- a/ignite/contrib/handlers/base_logger.py\n+++ b/ignite/contrib/handlers/base_logger.py\n@@ -105,7 +105,7 @@\n if not isinstance(output_dict, dict):\n output_dict = {\"output\": output_dict}\n \n- metrics_state_attrs.update({name: value for name, value in output_dict.items()})\n+ metrics_state_attrs.update(output_dict)\n \n if self.state_attributes is not None:\n metrics_state_attrs.update({name: getattr(engine.state, name, None) for name in self.state_attributes})\n", "issue": "Seemingly unnecessary dictionary comprehension in contrib's BaseOutputHandler\nHi,\r\n`output_dict` is made sure to be a dictionary just above the line below. \r\n\r\nhttps://github.com/pytorch/ignite/blob/21a2ccc25dd7ff0aec5f1538387597cb093fe36e/ignite/contrib/handlers/base_logger.py#L108\r\n\r\nSo this line could be replaced with:\r\n\r\n```python\r\nmetrics_state_attrs.update(output_dict) \r\n```\nSeemingly unnecessary dictionary comprehension in contrib's BaseOutputHandler\nHi,\r\n`output_dict` is made sure to be a dictionary just above the line below. \r\n\r\nhttps://github.com/pytorch/ignite/blob/21a2ccc25dd7ff0aec5f1538387597cb093fe36e/ignite/contrib/handlers/base_logger.py#L108\r\n\r\nSo this line could be replaced with:\r\n\r\n```python\r\nmetrics_state_attrs.update(output_dict) \r\n```\n", "before_files": [{"content": "\"\"\"Base logger and its helper handlers.\"\"\"\nimport numbers\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\n\nfrom ignite.engine import Engine, Events, EventsList, State\nfrom ignite.engine.events import CallableEventWithFilter, RemovableEventHandle\n\n\nclass BaseHandler(metaclass=ABCMeta):\n \"\"\"Base handler for defining various useful handlers.\"\"\"\n\n @abstractmethod\n def __call__(self, engine: Engine, logger: Any, event_name: Union[str, Events]) -> None:\n pass\n\n\nclass BaseOptimizerParamsHandler(BaseHandler):\n \"\"\"\n Base handler for logging optimizer parameters\n \"\"\"\n\n def __init__(self, optimizer: Optimizer, param_name: str = \"lr\", tag: Optional[str] = None):\n if not (\n isinstance(optimizer, Optimizer)\n or (hasattr(optimizer, \"param_groups\") and isinstance(optimizer.param_groups, Sequence))\n ):\n raise TypeError(\n \"Argument optimizer should be torch.optim.Optimizer or has attribute 'param_groups' as list/tuple, \"\n f\"but given {type(optimizer)}\"\n )\n\n self.optimizer = optimizer\n self.param_name = param_name\n self.tag = tag\n\n\nclass BaseOutputHandler(BaseHandler):\n \"\"\"\n Helper handler to log engine's output and/or metrics\n \"\"\"\n\n def __init__(\n self,\n tag: str,\n metric_names: Optional[Union[str, List[str]]] = None,\n output_transform: Optional[Callable] = None,\n global_step_transform: Optional[Callable] = None,\n state_attributes: Optional[List[str]] = None,\n ):\n\n if metric_names is not None:\n if not (isinstance(metric_names, list) or (isinstance(metric_names, str) and metric_names == \"all\")):\n raise TypeError(\n f\"metric_names should be either a list or equal 'all', got {type(metric_names)} instead.\"\n )\n\n if output_transform is not None and not callable(output_transform):\n raise TypeError(f\"output_transform should be a function, got {type(output_transform)} instead.\")\n\n if output_transform is None and metric_names is None and state_attributes is None:\n raise ValueError(\"Either metric_names, output_transform or state_attributes should be defined\")\n\n if global_step_transform is not None and not callable(global_step_transform):\n raise TypeError(f\"global_step_transform should be a function, got {type(global_step_transform)} instead.\")\n\n if global_step_transform is None:\n\n def global_step_transform(engine: Engine, event_name: Union[str, Events]) -> int:\n return engine.state.get_event_attrib_value(event_name)\n\n self.tag = tag\n self.metric_names = metric_names\n self.output_transform = output_transform\n self.global_step_transform = global_step_transform\n self.state_attributes = state_attributes\n\n def _setup_output_metrics_state_attrs(\n self, engine: Engine, log_text: Optional[bool] = False, key_tuple: Optional[bool] = True\n ) -> Dict[Any, Any]:\n \"\"\"Helper method to setup metrics and state attributes to log\"\"\"\n metrics_state_attrs = OrderedDict()\n if self.metric_names is not None:\n if isinstance(self.metric_names, str) and self.metric_names == \"all\":\n metrics_state_attrs = OrderedDict(engine.state.metrics)\n else:\n for name in self.metric_names:\n if name not in engine.state.metrics:\n warnings.warn(\n f\"Provided metric name '{name}' is missing \"\n f\"in engine's state metrics: {list(engine.state.metrics.keys())}\"\n )\n continue\n metrics_state_attrs[name] = engine.state.metrics[name]\n\n if self.output_transform is not None:\n output_dict = self.output_transform(engine.state.output)\n\n if not isinstance(output_dict, dict):\n output_dict = {\"output\": output_dict}\n\n metrics_state_attrs.update({name: value for name, value in output_dict.items()})\n\n if self.state_attributes is not None:\n metrics_state_attrs.update({name: getattr(engine.state, name, None) for name in self.state_attributes})\n\n metrics_state_attrs_dict = OrderedDict() # type: Dict[Any, Union[str, float, numbers.Number]]\n\n def key_tuple_tf(tag: str, name: str, *args: str) -> Tuple[str, ...]:\n return (tag, name) + args\n\n def key_str_tf(tag: str, name: str, *args: str) -> str:\n return \"/\".join((tag, name) + args)\n\n key_tf = key_tuple_tf if key_tuple else key_str_tf\n\n for name, value in metrics_state_attrs.items():\n if isinstance(value, numbers.Number):\n metrics_state_attrs_dict[key_tf(self.tag, name)] = value\n elif isinstance(value, torch.Tensor) and value.ndimension() == 0:\n metrics_state_attrs_dict[key_tf(self.tag, name)] = value.item()\n elif isinstance(value, torch.Tensor) and value.ndimension() == 1:\n for i, v in enumerate(value):\n metrics_state_attrs_dict[key_tf(self.tag, name, str(i))] = v.item()\n else:\n if isinstance(value, str) and log_text:\n metrics_state_attrs_dict[key_tf(self.tag, name)] = value\n else:\n warnings.warn(f\"Logger output_handler can not log metrics value type {type(value)}\")\n return metrics_state_attrs_dict\n\n\nclass BaseWeightsScalarHandler(BaseHandler):\n \"\"\"\n Helper handler to log model's weights as scalars.\n \"\"\"\n\n def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):\n if not isinstance(model, torch.nn.Module):\n raise TypeError(f\"Argument model should be of type torch.nn.Module, but given {type(model)}\")\n\n if not callable(reduction):\n raise TypeError(f\"Argument reduction should be callable, but given {type(reduction)}\")\n\n def _is_0D_tensor(t: torch.Tensor) -> bool:\n return isinstance(t, torch.Tensor) and t.ndimension() == 0\n\n # Test reduction function on a tensor\n o = reduction(torch.ones(4, 2))\n if not (isinstance(o, numbers.Number) or _is_0D_tensor(o)):\n raise TypeError(f\"Output of the reduction function should be a scalar, but got {type(o)}\")\n\n self.model = model\n self.reduction = reduction\n self.tag = tag\n\n\nclass BaseWeightsHistHandler(BaseHandler):\n \"\"\"\n Helper handler to log model's weights as histograms.\n \"\"\"\n\n def __init__(self, model: nn.Module, tag: Optional[str] = None):\n if not isinstance(model, torch.nn.Module):\n raise TypeError(f\"Argument model should be of type torch.nn.Module, but given {type(model)}\")\n\n self.model = model\n self.tag = tag\n\n\nclass BaseLogger(metaclass=ABCMeta):\n \"\"\"\n Base logger handler. See implementations: TensorboardLogger, VisdomLogger, PolyaxonLogger, MLflowLogger, ...\n\n \"\"\"\n\n def attach(\n self,\n engine: Engine,\n log_handler: Callable,\n event_name: Union[str, Events, CallableEventWithFilter, EventsList],\n *args: Any,\n **kwargs: Any,\n ) -> RemovableEventHandle:\n \"\"\"Attach the logger to the engine and execute `log_handler` function at `event_name` events.\n\n Args:\n engine: engine object.\n log_handler: a logging handler to execute\n event_name: event to attach the logging handler to. Valid events are from\n :class:`~ignite.engine.events.Events` or :class:`~ignite.engine.events.EventsList` or any `event_name`\n added by :meth:`~ignite.engine.engine.Engine.register_events`.\n args: args forwarded to the `log_handler` method\n kwargs: kwargs forwarded to the `log_handler` method\n\n Returns:\n :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.\n \"\"\"\n if isinstance(event_name, EventsList):\n for name in event_name:\n if name not in State.event_to_attr:\n raise RuntimeError(f\"Unknown event name '{name}'\")\n engine.add_event_handler(name, log_handler, self, name)\n\n return RemovableEventHandle(event_name, log_handler, engine)\n\n else:\n\n if event_name not in State.event_to_attr:\n raise RuntimeError(f\"Unknown event name '{event_name}'\")\n\n return engine.add_event_handler(event_name, log_handler, self, event_name, *args, **kwargs)\n\n def attach_output_handler(self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any) -> RemovableEventHandle:\n \"\"\"Shortcut method to attach `OutputHandler` to the logger.\n\n Args:\n engine: engine object.\n event_name: event to attach the logging handler to. Valid events are from\n :class:`~ignite.engine.events.Events` or any `event_name` added by\n :meth:`~ignite.engine.engine.Engine.register_events`.\n args: args to initialize `OutputHandler`\n kwargs: kwargs to initialize `OutputHandler`\n\n Returns:\n :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.\n \"\"\"\n return self.attach(engine, self._create_output_handler(*args, **kwargs), event_name=event_name)\n\n def attach_opt_params_handler(\n self, engine: Engine, event_name: Any, *args: Any, **kwargs: Any\n ) -> RemovableEventHandle:\n \"\"\"Shortcut method to attach `OptimizerParamsHandler` to the logger.\n\n Args:\n engine: engine object.\n event_name: event to attach the logging handler to. Valid events are from\n :class:`~ignite.engine.events.Events` or any `event_name` added by\n :meth:`~ignite.engine.engine.Engine.register_events`.\n args: args to initialize `OptimizerParamsHandler`\n kwargs: kwargs to initialize `OptimizerParamsHandler`\n\n Returns:\n :class:`~ignite.engine.events.RemovableEventHandle`, which can be used to remove the handler.\n\n .. versionchanged:: 0.4.3\n Added missing return statement.\n \"\"\"\n return self.attach(engine, self._create_opt_params_handler(*args, **kwargs), event_name=event_name)\n\n @abstractmethod\n def _create_output_handler(self, engine: Engine, *args: Any, **kwargs: Any) -> Callable:\n pass\n\n @abstractmethod\n def _create_opt_params_handler(self, *args: Any, **kwargs: Any) -> Callable:\n pass\n\n def __enter__(self) -> \"BaseLogger\":\n return self\n\n def __exit__(self, type: Any, value: Any, traceback: Any) -> None:\n self.close()\n\n def close(self) -> None:\n pass\n", "path": "ignite/contrib/handlers/base_logger.py"}]}
| 3,845 | 147 |
gh_patches_debug_6913
|
rasdani/github-patches
|
git_diff
|
horovod__horovod-214
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error on using hvd.mpi_threads_supported() error: undefined symbol: mpi_threads_supported
Error on using hvd.mpi_threads_supported()
error: undefined symbol: mpi_threads_supported
`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()`
should be
`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()`
</issue>
<code>
[start of horovod/common/__init__.py]
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 # Modifications copyright (C) 2018 Uber Technologies, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 # =============================================================================
16
17 import ctypes
18 import os
19 import sysconfig
20
21
22 def get_ext_suffix():
23 """Determine library extension for various versions of Python."""
24 ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
25 if ext_suffix:
26 return ext_suffix
27
28 ext_suffix = sysconfig.get_config_var('SO')
29 if ext_suffix:
30 return ext_suffix
31
32 return '.so'
33
34
35 MPI_COMMON_LIB_CTYPES = \
36 ctypes.CDLL(os.path.join(os.path.dirname(__file__),
37 'mpi_lib' + get_ext_suffix()), mode=ctypes.RTLD_GLOBAL)
38
39
40 def init():
41 """A function that initializes Horovod.
42 """
43 return MPI_COMMON_LIB_CTYPES.horovod_init()
44
45
46 def size():
47 """A function that returns the number of Horovod processes.
48
49 Returns:
50 An integer scalar containing the number of Horovod processes.
51 """
52 size = MPI_COMMON_LIB_CTYPES.horovod_size()
53 if size == -1:
54 raise ValueError(
55 'Horovod has not been initialized; use hvd.init().')
56 return size
57
58
59 def local_size():
60 """A function that returns the number of Horovod processes within the
61 node the current process is running on.
62
63 Returns:
64 An integer scalar containing the number of local Horovod processes.
65 """
66 local_size = MPI_COMMON_LIB_CTYPES.horovod_local_size()
67 if local_size == -1:
68 raise ValueError(
69 'Horovod has not been initialized; use hvd.init().')
70 return local_size
71
72
73 def rank():
74 """A function that returns the Horovod rank of the calling process.
75
76 Returns:
77 An integer scalar with the Horovod rank of the calling process.
78 """
79 rank = MPI_COMMON_LIB_CTYPES.horovod_rank()
80 if rank == -1:
81 raise ValueError(
82 'Horovod has not been initialized; use hvd.init().')
83 return rank
84
85
86 def local_rank():
87 """A function that returns the local Horovod rank of the calling process, within the
88 node that it is running on. For example, if there are seven processes running
89 on a node, their local ranks will be zero through six, inclusive.
90
91 Returns:
92 An integer scalar with the local Horovod rank of the calling process.
93 """
94 local_rank = MPI_COMMON_LIB_CTYPES.horovod_local_rank()
95 if local_rank == -1:
96 raise ValueError(
97 'Horovod has not been initialized; use hvd.init().')
98 return local_rank
99
100
101 def mpi_threads_supported():
102 """A function that returns a flag indicating whether MPI multi-threading is supported.
103
104 If MPI multi-threading is supported, users may mix and match Horovod usage with other
105 MPI libraries, such as `mpi4py`.
106
107 Returns:
108 A boolean value indicating whether MPI multi-threading is supported.
109 """
110 mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()
111 if mpi_threads_supported == -1:
112 raise ValueError(
113 'Horovod has not been initialized; use hvd.init().')
114 return bool(mpi_threads_supported)
115
[end of horovod/common/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/horovod/common/__init__.py b/horovod/common/__init__.py
--- a/horovod/common/__init__.py
+++ b/horovod/common/__init__.py
@@ -107,7 +107,7 @@
Returns:
A boolean value indicating whether MPI multi-threading is supported.
"""
- mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()
+ mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()
if mpi_threads_supported == -1:
raise ValueError(
'Horovod has not been initialized; use hvd.init().')
|
{"golden_diff": "diff --git a/horovod/common/__init__.py b/horovod/common/__init__.py\n--- a/horovod/common/__init__.py\n+++ b/horovod/common/__init__.py\n@@ -107,7 +107,7 @@\n Returns:\n A boolean value indicating whether MPI multi-threading is supported.\n \"\"\"\n- mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()\n+ mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()\n if mpi_threads_supported == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n", "issue": "Error on using hvd.mpi_threads_supported() error: undefined symbol: mpi_threads_supported\nError on using hvd.mpi_threads_supported() \r\n\r\nerror: undefined symbol: mpi_threads_supported\r\n\r\n`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()`\r\n should be \r\n`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()`\r\n\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport ctypes\nimport os\nimport sysconfig\n\n\ndef get_ext_suffix():\n \"\"\"Determine library extension for various versions of Python.\"\"\"\n ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')\n if ext_suffix:\n return ext_suffix\n\n ext_suffix = sysconfig.get_config_var('SO')\n if ext_suffix:\n return ext_suffix\n\n return '.so'\n\n\nMPI_COMMON_LIB_CTYPES = \\\n ctypes.CDLL(os.path.join(os.path.dirname(__file__),\n 'mpi_lib' + get_ext_suffix()), mode=ctypes.RTLD_GLOBAL)\n\n\ndef init():\n \"\"\"A function that initializes Horovod.\n \"\"\"\n return MPI_COMMON_LIB_CTYPES.horovod_init()\n\n\ndef size():\n \"\"\"A function that returns the number of Horovod processes.\n\n Returns:\n An integer scalar containing the number of Horovod processes.\n \"\"\"\n size = MPI_COMMON_LIB_CTYPES.horovod_size()\n if size == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return size\n\n\ndef local_size():\n \"\"\"A function that returns the number of Horovod processes within the\n node the current process is running on.\n\n Returns:\n An integer scalar containing the number of local Horovod processes.\n \"\"\"\n local_size = MPI_COMMON_LIB_CTYPES.horovod_local_size()\n if local_size == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return local_size\n\n\ndef rank():\n \"\"\"A function that returns the Horovod rank of the calling process.\n\n Returns:\n An integer scalar with the Horovod rank of the calling process.\n \"\"\"\n rank = MPI_COMMON_LIB_CTYPES.horovod_rank()\n if rank == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return rank\n\n\ndef local_rank():\n \"\"\"A function that returns the local Horovod rank of the calling process, within the\n node that it is running on. For example, if there are seven processes running\n on a node, their local ranks will be zero through six, inclusive.\n\n Returns:\n An integer scalar with the local Horovod rank of the calling process.\n \"\"\"\n local_rank = MPI_COMMON_LIB_CTYPES.horovod_local_rank()\n if local_rank == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return local_rank\n\n\ndef mpi_threads_supported():\n \"\"\"A function that returns a flag indicating whether MPI multi-threading is supported.\n\n If MPI multi-threading is supported, users may mix and match Horovod usage with other\n MPI libraries, such as `mpi4py`.\n\n Returns:\n A boolean value indicating whether MPI multi-threading is supported.\n \"\"\"\n mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()\n if mpi_threads_supported == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return bool(mpi_threads_supported)\n", "path": "horovod/common/__init__.py"}]}
| 1,693 | 144 |
gh_patches_debug_32181
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-1281
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Segmentation fault when using tfa.rotate in tf-nightly
**System information**
- OS Platform: Linux Ubuntu 18.04.3 LTS
- TensorFlow version: tf-nightly (2.2.0-dev20200310) installed with pip
- TensorFlow-Addons version: 0.8.3 (pip) and tfa-nightly (0.9.0-dev; pip)
- Python version: 3.6.9
- Is GPU used?: yes
**Describe the bug**
I currently have to use tf-nightly because of a separate issue. When I use tfa.rotate I get a segmentation fault. Behavior is the same with tfa 0.8.3 and tfa-nightly.
**Code to reproduce the issue**
```
import tensorflow as tf
import tensorflow_addons as tfa
img = tf.io.read_file(IMG_PATH)
img = tf.image.decode_png(img)
print(img.shape)
img = tfa.image.rotate(img, 90)
```
Output:
> (128, 128, 3)
> Segmentation fault (core dumped)
I also tried `interpolation="BILINEAR"`. Problem is the same.
</issue>
<code>
[start of tensorflow_addons/utils/ensure_tf_install.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16
17 # Ensure TensorFlow is importable and its version is sufficiently recent. This
18 # needs to happen before anything else, since the imports below will try to
19 # import tensorflow, too.
20 def _ensure_tf_install():
21 """Attempt to import tensorflow, and ensure its version is sufficient.
22 Raises:
23 ImportError: if either tensorflow is not importable or its version is
24 inadequate.
25 """
26 import tensorflow as tf
27 import distutils.version
28
29 #
30 # Update this whenever we need to depend on a newer TensorFlow release.
31 #
32 required_tensorflow_version = "2.1.0"
33
34 if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(
35 required_tensorflow_version
36 ):
37 raise ImportError(
38 "This version of TensorFlow Addons requires TensorFlow "
39 "version >= {required}; Detected an installation of version "
40 "{present}. Please upgrade TensorFlow to proceed.".format(
41 required=required_tensorflow_version, present=tf.__version__
42 )
43 )
44
[end of tensorflow_addons/utils/ensure_tf_install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tensorflow_addons/utils/ensure_tf_install.py b/tensorflow_addons/utils/ensure_tf_install.py
--- a/tensorflow_addons/utils/ensure_tf_install.py
+++ b/tensorflow_addons/utils/ensure_tf_install.py
@@ -17,27 +17,40 @@
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
+
+from distutils.version import LooseVersion
+import warnings
+
+import tensorflow as tf
+
+
+warning_template = """
+This version of TensorFlow Addons requires TensorFlow {required}.
+Detected an installation of version {present}.
+
+While some functions might work, TensorFlow Addons was not tested
+with this TensorFlow version. Also custom ops were not compiled
+against this version of TensorFlow. If you use custom ops,
+you might get errors (segmentation faults for example).
+
+It might help you to fallback to pure Python ops with
+TF_ADDONS_PY_OPS . To do that, see
+https://github.com/tensorflow/addons#gpucpu-custom-ops
+
+If you encounter errors, do *not* file bugs in GitHub because
+the version of TensorFlow you are using is not supported.
+"""
+
+
def _ensure_tf_install():
- """Attempt to import tensorflow, and ensure its version is sufficient.
- Raises:
- ImportError: if either tensorflow is not importable or its version is
- inadequate.
+ """Warn the user if the version of TensorFlow used is not supported.
"""
- import tensorflow as tf
- import distutils.version
- #
# Update this whenever we need to depend on a newer TensorFlow release.
- #
- required_tensorflow_version = "2.1.0"
-
- if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(
- required_tensorflow_version
- ):
- raise ImportError(
- "This version of TensorFlow Addons requires TensorFlow "
- "version >= {required}; Detected an installation of version "
- "{present}. Please upgrade TensorFlow to proceed.".format(
- required=required_tensorflow_version, present=tf.__version__
- )
+ required_tf_version = "2.1.0"
+
+ if LooseVersion(tf.__version__) != LooseVersion(required_tf_version):
+ message = warning_template.format(
+ required=required_tf_version, present=tf.__version__
)
+ warnings.warn(message, UserWarning)
|
{"golden_diff": "diff --git a/tensorflow_addons/utils/ensure_tf_install.py b/tensorflow_addons/utils/ensure_tf_install.py\n--- a/tensorflow_addons/utils/ensure_tf_install.py\n+++ b/tensorflow_addons/utils/ensure_tf_install.py\n@@ -17,27 +17,40 @@\n # Ensure TensorFlow is importable and its version is sufficiently recent. This\n # needs to happen before anything else, since the imports below will try to\n # import tensorflow, too.\n+\n+from distutils.version import LooseVersion\n+import warnings\n+\n+import tensorflow as tf\n+\n+\n+warning_template = \"\"\"\n+This version of TensorFlow Addons requires TensorFlow {required}.\n+Detected an installation of version {present}.\n+\n+While some functions might work, TensorFlow Addons was not tested\n+with this TensorFlow version. Also custom ops were not compiled\n+against this version of TensorFlow. If you use custom ops,\n+you might get errors (segmentation faults for example).\n+\n+It might help you to fallback to pure Python ops with\n+TF_ADDONS_PY_OPS . To do that, see\n+https://github.com/tensorflow/addons#gpucpu-custom-ops\n+\n+If you encounter errors, do *not* file bugs in GitHub because\n+the version of TensorFlow you are using is not supported.\n+\"\"\"\n+\n+\n def _ensure_tf_install():\n- \"\"\"Attempt to import tensorflow, and ensure its version is sufficient.\n- Raises:\n- ImportError: if either tensorflow is not importable or its version is\n- inadequate.\n+ \"\"\"Warn the user if the version of TensorFlow used is not supported.\n \"\"\"\n- import tensorflow as tf\n- import distutils.version\n \n- #\n # Update this whenever we need to depend on a newer TensorFlow release.\n- #\n- required_tensorflow_version = \"2.1.0\"\n-\n- if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(\n- required_tensorflow_version\n- ):\n- raise ImportError(\n- \"This version of TensorFlow Addons requires TensorFlow \"\n- \"version >= {required}; Detected an installation of version \"\n- \"{present}. Please upgrade TensorFlow to proceed.\".format(\n- required=required_tensorflow_version, present=tf.__version__\n- )\n+ required_tf_version = \"2.1.0\"\n+\n+ if LooseVersion(tf.__version__) != LooseVersion(required_tf_version):\n+ message = warning_template.format(\n+ required=required_tf_version, present=tf.__version__\n )\n+ warnings.warn(message, UserWarning)\n", "issue": "Segmentation fault when using tfa.rotate in tf-nightly\n**System information**\r\n- OS Platform: Linux Ubuntu 18.04.3 LTS\r\n- TensorFlow version: tf-nightly (2.2.0-dev20200310) installed with pip\r\n- TensorFlow-Addons version: 0.8.3 (pip) and tfa-nightly (0.9.0-dev; pip)\r\n- Python version: 3.6.9\r\n- Is GPU used?: yes\r\n\r\n**Describe the bug**\r\nI currently have to use tf-nightly because of a separate issue. When I use tfa.rotate I get a segmentation fault. Behavior is the same with tfa 0.8.3 and tfa-nightly.\r\n\r\n**Code to reproduce the issue**\r\n```\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\n\r\nimg = tf.io.read_file(IMG_PATH)\r\nimg = tf.image.decode_png(img)\r\nprint(img.shape)\r\nimg = tfa.image.rotate(img, 90)\r\n```\r\nOutput: \r\n> (128, 128, 3)\r\n> Segmentation fault (core dumped)\r\n\r\nI also tried `interpolation=\"BILINEAR\"`. Problem is the same. \n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n# Ensure TensorFlow is importable and its version is sufficiently recent. This\n# needs to happen before anything else, since the imports below will try to\n# import tensorflow, too.\ndef _ensure_tf_install():\n \"\"\"Attempt to import tensorflow, and ensure its version is sufficient.\n Raises:\n ImportError: if either tensorflow is not importable or its version is\n inadequate.\n \"\"\"\n import tensorflow as tf\n import distutils.version\n\n #\n # Update this whenever we need to depend on a newer TensorFlow release.\n #\n required_tensorflow_version = \"2.1.0\"\n\n if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(\n required_tensorflow_version\n ):\n raise ImportError(\n \"This version of TensorFlow Addons requires TensorFlow \"\n \"version >= {required}; Detected an installation of version \"\n \"{present}. Please upgrade TensorFlow to proceed.\".format(\n required=required_tensorflow_version, present=tf.__version__\n )\n )\n", "path": "tensorflow_addons/utils/ensure_tf_install.py"}]}
| 1,233 | 556 |
gh_patches_debug_11369
|
rasdani/github-patches
|
git_diff
|
apluslms__a-plus-1348
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use setUpTestData in tests
`setUp` is ran before every test method but `setUpTestData` is run only once for the whole test class. Each test method is run in a transaction and rolled back at the end, so there is no need to re-create the database data for each method separately. Most of the time in unit tests seems to be spent on creating data for the tests, so it would be a massive speed up to reuse the data by using `setUpTestData`.
`setUpTestData` takes the class as an argument but it can be used similarly to `setUp`: just assign the properties to the class (e.g. `cls.instance = CourseInstance...`) and the property can be accessed normally through the object (e.g. `self.instance`).
</issue>
<code>
[start of lib/testdata.py]
1 from datetime import timedelta
2 from django.conf import settings
3 from django.contrib.auth.models import User
4 from django.test import TestCase
5 from django.utils import timezone
6
7 from course.models import (
8 Course,
9 CourseInstance,
10 CourseModule,
11 LearningObjectCategory,
12 )
13 from exercise.models import (
14 BaseExercise,
15 StaticExercise,
16 Submission,
17 )
18
19
20 class CourseTestCase(TestCase):
21
22 def setUp(self):
23 self.setUpCourse()
24 self.setUpSubmissions()
25
26 def setUpCourse(self):
27
28 self.now = timezone.now()
29 self.tomorrow = self.now + timedelta(days=1)
30 self.two_days_after = self.now + timedelta(days=2)
31 self.three_days_after = self.now + timedelta(days=3)
32 self.yesterday = self.now - timedelta(days=1)
33 self.two_days_before = self.now - timedelta(days=2)
34
35 self.user = User(username='testUser')
36 self.user.set_password('testPassword')
37 self.user.save()
38
39 self.teacher = User(username='testTeacher')
40 self.teacher.set_password('testPassword')
41 self.teacher.save()
42
43 self.student = User(username='testStudent')
44 self.student.set_password('testPassword')
45 self.student.save()
46 self.student.userprofile.student_id = "123TEST"
47 self.student.userprofile.organization = settings.LOCAL_ORGANIZATION
48 self.student.userprofile.save()
49
50 self.course = Course.objects.create(
51 url="course",
52 name="Test Course",
53 code="123456",
54 )
55
56 self.instance = CourseInstance.objects.create(
57 course=self.course,
58 url="instance",
59 instance_name="2016",
60 starting_time=self.now,
61 ending_time=self.tomorrow,
62 )
63 self.instance.add_teacher(self.teacher.userprofile)
64 self.instance.enroll_student(self.student)
65
66 self.module = CourseModule.objects.create(
67 course_instance=self.instance,
68 url="module",
69 name="Test Module",
70 points_to_pass=10,
71 opening_time=self.now,
72 closing_time=self.tomorrow,
73 late_submissions_allowed=True,
74 late_submission_deadline=self.two_days_after,
75 late_submission_penalty=0.2
76 )
77 self.module2 = CourseModule.objects.create(
78 course_instance=self.instance,
79 url="module2",
80 name="Test Module 2",
81 points_to_pass=0,
82 opening_time=self.tomorrow,
83 closing_time=self.two_days_after,
84 )
85 self.module0 = CourseModule.objects.create(
86 course_instance=self.instance,
87 url="module0",
88 name="Past Module",
89 points_to_pass=10,
90 opening_time=self.two_days_before,
91 closing_time=self.yesterday,
92 )
93 self.category = LearningObjectCategory.objects.create(
94 course_instance=self.instance,
95 name="Test Category",
96 points_to_pass=5,
97 )
98
99 self.exercise = StaticExercise.objects.create(
100 course_module=self.module,
101 category=self.category,
102 url='e1',
103 name="Test Exercise",
104 exercise_page_content='$$exercise$$content',
105 submission_page_content='$$exercise$$received',
106 points_to_pass=0,
107 max_points=100,
108 order=1,
109 )
110 self.exercise2 = StaticExercise.objects.create(
111 course_module=self.module,
112 category=self.category,
113 url='e2',
114 name="Test Exercise 2",
115 exercise_page_content='$$exercise2$$content',
116 submission_page_content='$$exercise2$$received',
117 points_to_pass=10,
118 max_points=100,
119 order=2,
120 )
121 self.exercise3 = StaticExercise.objects.create(
122 course_module=self.module2,
123 category=self.category,
124 url='e3',
125 name="Test Exercise 3",
126 exercise_page_content='$$exercise3$$content',
127 submission_page_content='$$exercise3$$received',
128 points_to_pass=0,
129 max_points=100,
130 )
131 self.exercise0 = BaseExercise.objects.create(
132 course_module=self.module0,
133 category=self.category,
134 url='b0',
135 name="Base Exercise 0",
136 service_url="http://localhost/",
137 points_to_pass=0,
138 max_points=100,
139 min_group_size=1,
140 max_group_size=2,
141 )
142
143 def setUpSubmissions(self):
144
145 self.submission = Submission.objects.create(
146 exercise=self.exercise,
147 submission_data={'submission':1},
148 feedback='$$submission$$feedback',
149 )
150 self.submission.submitters.add(self.student.userprofile)
151 self.submission.set_points(1,2)
152 self.submission.set_ready()
153 self.submission.save()
154
155 self.submission2 = Submission.objects.create(
156 exercise=self.exercise,
157 submission_data={'submission':2},
158 )
159 self.submission2.submitters.add(self.student.userprofile)
160
161 self.submission3 = Submission.objects.create(
162 exercise=self.exercise2,
163 submission_data={'submission':3},
164 )
165 self.submission3.submitters.add(self.student.userprofile)
166 self.submission3.submitters.add(self.user.userprofile)
167
[end of lib/testdata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/testdata.py b/lib/testdata.py
--- a/lib/testdata.py
+++ b/lib/testdata.py
@@ -19,10 +19,12 @@
class CourseTestCase(TestCase):
- def setUp(self):
- self.setUpCourse()
- self.setUpSubmissions()
+ @classmethod
+ def setUpTestData(cls):
+ cls.setUpCourse()
+ cls.setUpSubmissions()
+ @classmethod
def setUpCourse(self):
self.now = timezone.now()
@@ -140,6 +142,7 @@
max_group_size=2,
)
+ @classmethod
def setUpSubmissions(self):
self.submission = Submission.objects.create(
|
{"golden_diff": "diff --git a/lib/testdata.py b/lib/testdata.py\n--- a/lib/testdata.py\n+++ b/lib/testdata.py\n@@ -19,10 +19,12 @@\n \n class CourseTestCase(TestCase):\n \n- def setUp(self):\n- self.setUpCourse()\n- self.setUpSubmissions()\n+ @classmethod\n+ def setUpTestData(cls):\n+ cls.setUpCourse()\n+ cls.setUpSubmissions()\n \n+ @classmethod\n def setUpCourse(self):\n \n self.now = timezone.now()\n@@ -140,6 +142,7 @@\n max_group_size=2,\n )\n \n+ @classmethod\n def setUpSubmissions(self):\n \n self.submission = Submission.objects.create(\n", "issue": "Use setUpTestData in tests \n`setUp` is ran before every test method but `setUpTestData` is run only once for the whole test class. Each test method is run in a transaction and rolled back at the end, so there is no need to re-create the database data for each method separately. Most of the time in unit tests seems to be spent on creating data for the tests, so it would be a massive speed up to reuse the data by using `setUpTestData`.\r\n\r\n`setUpTestData` takes the class as an argument but it can be used similarly to `setUp`: just assign the properties to the class (e.g. `cls.instance = CourseInstance...`) and the property can be accessed normally through the object (e.g. `self.instance`).\n", "before_files": [{"content": "from datetime import timedelta\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom course.models import (\n Course,\n CourseInstance,\n CourseModule,\n LearningObjectCategory,\n)\nfrom exercise.models import (\n BaseExercise,\n StaticExercise,\n Submission,\n)\n\n\nclass CourseTestCase(TestCase):\n\n def setUp(self):\n self.setUpCourse()\n self.setUpSubmissions()\n\n def setUpCourse(self):\n\n self.now = timezone.now()\n self.tomorrow = self.now + timedelta(days=1)\n self.two_days_after = self.now + timedelta(days=2)\n self.three_days_after = self.now + timedelta(days=3)\n self.yesterday = self.now - timedelta(days=1)\n self.two_days_before = self.now - timedelta(days=2)\n\n self.user = User(username='testUser')\n self.user.set_password('testPassword')\n self.user.save()\n\n self.teacher = User(username='testTeacher')\n self.teacher.set_password('testPassword')\n self.teacher.save()\n\n self.student = User(username='testStudent')\n self.student.set_password('testPassword')\n self.student.save()\n self.student.userprofile.student_id = \"123TEST\"\n self.student.userprofile.organization = settings.LOCAL_ORGANIZATION\n self.student.userprofile.save()\n\n self.course = Course.objects.create(\n url=\"course\",\n name=\"Test Course\",\n code=\"123456\",\n )\n\n self.instance = CourseInstance.objects.create(\n course=self.course,\n url=\"instance\",\n instance_name=\"2016\",\n starting_time=self.now,\n ending_time=self.tomorrow,\n )\n self.instance.add_teacher(self.teacher.userprofile)\n self.instance.enroll_student(self.student)\n\n self.module = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module\",\n name=\"Test Module\",\n points_to_pass=10,\n opening_time=self.now,\n closing_time=self.tomorrow,\n late_submissions_allowed=True,\n late_submission_deadline=self.two_days_after,\n late_submission_penalty=0.2\n )\n self.module2 = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module2\",\n name=\"Test Module 2\",\n points_to_pass=0,\n opening_time=self.tomorrow,\n closing_time=self.two_days_after,\n )\n self.module0 = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module0\",\n name=\"Past Module\",\n points_to_pass=10,\n opening_time=self.two_days_before,\n closing_time=self.yesterday,\n )\n self.category = LearningObjectCategory.objects.create(\n course_instance=self.instance,\n name=\"Test Category\",\n points_to_pass=5,\n )\n\n self.exercise = StaticExercise.objects.create(\n course_module=self.module,\n category=self.category,\n url='e1',\n name=\"Test Exercise\",\n exercise_page_content='$$exercise$$content',\n submission_page_content='$$exercise$$received',\n points_to_pass=0,\n max_points=100,\n order=1,\n )\n self.exercise2 = StaticExercise.objects.create(\n course_module=self.module,\n category=self.category,\n url='e2',\n name=\"Test Exercise 2\",\n exercise_page_content='$$exercise2$$content',\n submission_page_content='$$exercise2$$received',\n points_to_pass=10,\n max_points=100,\n order=2,\n )\n self.exercise3 = StaticExercise.objects.create(\n course_module=self.module2,\n category=self.category,\n url='e3',\n name=\"Test Exercise 3\",\n exercise_page_content='$$exercise3$$content',\n submission_page_content='$$exercise3$$received',\n points_to_pass=0,\n max_points=100,\n )\n self.exercise0 = BaseExercise.objects.create(\n course_module=self.module0,\n category=self.category,\n url='b0',\n name=\"Base Exercise 0\",\n service_url=\"http://localhost/\",\n points_to_pass=0,\n max_points=100,\n min_group_size=1,\n max_group_size=2,\n )\n\n def setUpSubmissions(self):\n\n self.submission = Submission.objects.create(\n exercise=self.exercise,\n submission_data={'submission':1},\n feedback='$$submission$$feedback',\n )\n self.submission.submitters.add(self.student.userprofile)\n self.submission.set_points(1,2)\n self.submission.set_ready()\n self.submission.save()\n\n self.submission2 = Submission.objects.create(\n exercise=self.exercise,\n submission_data={'submission':2},\n )\n self.submission2.submitters.add(self.student.userprofile)\n\n self.submission3 = Submission.objects.create(\n exercise=self.exercise2,\n submission_data={'submission':3},\n )\n self.submission3.submitters.add(self.student.userprofile)\n self.submission3.submitters.add(self.user.userprofile)\n", "path": "lib/testdata.py"}]}
| 2,161 | 159 |
gh_patches_debug_25730
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-2828
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Discussion] oauth2client credentials in a post-oauth2client world.
We recently switched `core` to use [`google-auth`](https://google-auth.readthedocs.io) in #2639 and completely removed support for `oauth2client`.
It is possible that some users are explicitly passing `oauth2client` credentials into `Client` constructors, e.g.:
```python
credentials = oauth2client.contrib.gce.AppAppsertionCredentials()
storage_client = storage.Client(credentials=credentials)
```
After `google-cloud-core==0.22.0` is released this code will no longer work. Only credentials from `google-auth` will work. There is a potential that we could break some user code and there may be some valid reasons for sticking with oauth2client in the near-term (user credentials and storage, for example).
`google-auth` offers a way to [`convert`](https://github.com/GoogleCloudPlatform/google-auth-library-python/blob/443df66cb6e7960205c1d2108e67220d27408efe/google/auth/_oauth2client.py#L136) credentials:
```python
import google.auth._oauth2client
new_credentials = google.auth._oauth2client.convert(old_credentials)
```
We have two options:
1. Tell users who still want to use oauth2client to use `google.auth._oauth2client` to convert their credentials.
2. Add code in our `Client` base class to automatically convert credentials and issue a warning.
I strongly prefer (1) so that we do not prolong oauth2client's lifetime and so that we can more quickly free ourselves to switch http libraries.
</issue>
<code>
[start of core/google/cloud/client.py]
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Base classes for client used to interact with Google Cloud APIs."""
16
17 from google.oauth2 import service_account
18 import six
19
20 from google.cloud._helpers import _determine_default_project
21 from google.cloud._http import Connection
22 from google.cloud.credentials import get_credentials
23
24
25 class _ClientFactoryMixin(object):
26 """Mixin to allow factories that create credentials.
27
28 .. note::
29
30 This class is virtual.
31 """
32
33 @classmethod
34 def from_service_account_json(cls, json_credentials_path, *args, **kwargs):
35 """Factory to retrieve JSON credentials while creating client.
36
37 :type json_credentials_path: str
38 :param json_credentials_path: The path to a private key file (this file
39 was given to you when you created the
40 service account). This file must contain
41 a JSON object with a private key and
42 other credentials information (downloaded
43 from the Google APIs console).
44
45 :type args: tuple
46 :param args: Remaining positional arguments to pass to constructor.
47
48 :type kwargs: dict
49 :param kwargs: Remaining keyword arguments to pass to constructor.
50
51 :rtype: :class:`google.cloud.pubsub.client.Client`
52 :returns: The client created with the retrieved JSON credentials.
53 :raises: :class:`TypeError` if there is a conflict with the kwargs
54 and the credentials created by the factory.
55 """
56 if 'credentials' in kwargs:
57 raise TypeError('credentials must not be in keyword arguments')
58 credentials = service_account.Credentials.from_service_account_file(
59 json_credentials_path)
60 kwargs['credentials'] = credentials
61 return cls(*args, **kwargs)
62
63
64 class Client(_ClientFactoryMixin):
65 """Client to bundle configuration needed for API requests.
66
67 Assumes that the associated ``_connection_class`` only accepts
68 ``http`` and ``credentials`` in its constructor.
69
70 :type credentials: :class:`google.auth.credentials.Credentials` or
71 :class:`NoneType`
72 :param credentials: The OAuth2 Credentials to use for the connection
73 owned by this client. If not passed (and if no ``http``
74 object is passed), falls back to the default inferred
75 from the environment.
76
77 :type http: :class:`httplib2.Http` or class that defines ``request()``.
78 :param http: An optional HTTP object to make requests. If not passed, an
79 ``http`` object is created that is bound to the
80 ``credentials`` for the current object.
81 """
82
83 _connection_class = Connection
84
85 def __init__(self, credentials=None, http=None):
86 if credentials is None and http is None:
87 credentials = get_credentials()
88 self._connection = self._connection_class(
89 credentials=credentials, http=http)
90
91
92 class _ClientProjectMixin(object):
93 """Mixin to allow setting the project on the client.
94
95 :type project: str
96 :param project: the project which the client acts on behalf of. If not
97 passed falls back to the default inferred from the
98 environment.
99
100 :raises: :class:`EnvironmentError` if the project is neither passed in nor
101 set in the environment. :class:`ValueError` if the project value
102 is invalid.
103 """
104
105 def __init__(self, project=None):
106 project = self._determine_default(project)
107 if project is None:
108 raise EnvironmentError('Project was not passed and could not be '
109 'determined from the environment.')
110 if isinstance(project, six.binary_type):
111 project = project.decode('utf-8')
112 if not isinstance(project, six.string_types):
113 raise ValueError('Project must be a string.')
114 self.project = project
115
116 @staticmethod
117 def _determine_default(project):
118 """Helper: use default project detection."""
119 return _determine_default_project(project)
120
121
122 class JSONClient(Client, _ClientProjectMixin):
123 """Client for Google JSON-based API.
124
125 Assumes such APIs use the ``project`` and the client needs to store this
126 value.
127
128 :type project: str
129 :param project: the project which the client acts on behalf of. If not
130 passed falls back to the default inferred from the
131 environment.
132
133 :type credentials: :class:`google.auth.credentials.Credentials` or
134 :class:`NoneType`
135 :param credentials: The OAuth2 Credentials to use for the connection
136 owned by this client. If not passed (and if no ``http``
137 object is passed), falls back to the default inferred
138 from the environment.
139
140 :type http: :class:`httplib2.Http` or class that defines ``request()``.
141 :param http: An optional HTTP object to make requests. If not passed, an
142 ``http`` object is created that is bound to the
143 ``credentials`` for the current object.
144
145 :raises: :class:`ValueError` if the project is neither passed in nor
146 set in the environment.
147 """
148
149 def __init__(self, project=None, credentials=None, http=None):
150 _ClientProjectMixin.__init__(self, project=project)
151 Client.__init__(self, credentials=credentials, http=http)
152
[end of core/google/cloud/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py
--- a/core/google/cloud/client.py
+++ b/core/google/cloud/client.py
@@ -14,6 +14,7 @@
"""Base classes for client used to interact with Google Cloud APIs."""
+import google.auth.credentials
from google.oauth2 import service_account
import six
@@ -22,6 +23,13 @@
from google.cloud.credentials import get_credentials
+_GOOGLE_AUTH_CREDENTIALS_HELP = (
+ 'This library only supports credentials from google-auth-library-python. '
+ 'See https://google-cloud-python.readthedocs.io/en/latest/'
+ 'google-cloud-auth.html for help on authentication with this library.'
+)
+
+
class _ClientFactoryMixin(object):
"""Mixin to allow factories that create credentials.
@@ -83,6 +91,10 @@
_connection_class = Connection
def __init__(self, credentials=None, http=None):
+ if (credentials is not None and
+ not isinstance(
+ credentials, google.auth.credentials.Credentials)):
+ raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)
if credentials is None and http is None:
credentials = get_credentials()
self._connection = self._connection_class(
|
{"golden_diff": "diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py\n--- a/core/google/cloud/client.py\n+++ b/core/google/cloud/client.py\n@@ -14,6 +14,7 @@\n \n \"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n \n+import google.auth.credentials\n from google.oauth2 import service_account\n import six\n \n@@ -22,6 +23,13 @@\n from google.cloud.credentials import get_credentials\n \n \n+_GOOGLE_AUTH_CREDENTIALS_HELP = (\n+ 'This library only supports credentials from google-auth-library-python. '\n+ 'See https://google-cloud-python.readthedocs.io/en/latest/'\n+ 'google-cloud-auth.html for help on authentication with this library.'\n+)\n+\n+\n class _ClientFactoryMixin(object):\n \"\"\"Mixin to allow factories that create credentials.\n \n@@ -83,6 +91,10 @@\n _connection_class = Connection\n \n def __init__(self, credentials=None, http=None):\n+ if (credentials is not None and\n+ not isinstance(\n+ credentials, google.auth.credentials.Credentials)):\n+ raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)\n if credentials is None and http is None:\n credentials = get_credentials()\n self._connection = self._connection_class(\n", "issue": "[Discussion] oauth2client credentials in a post-oauth2client world.\nWe recently switched `core` to use [`google-auth`](https://google-auth.readthedocs.io) in #2639 and completely removed support for `oauth2client`.\r\n\r\nIt is possible that some users are explicitly passing `oauth2client` credentials into `Client` constructors, e.g.:\r\n\r\n```python\r\ncredentials = oauth2client.contrib.gce.AppAppsertionCredentials()\r\nstorage_client = storage.Client(credentials=credentials)\r\n```\r\n\r\nAfter `google-cloud-core==0.22.0` is released this code will no longer work. Only credentials from `google-auth` will work. There is a potential that we could break some user code and there may be some valid reasons for sticking with oauth2client in the near-term (user credentials and storage, for example).\r\n\r\n`google-auth` offers a way to [`convert`](https://github.com/GoogleCloudPlatform/google-auth-library-python/blob/443df66cb6e7960205c1d2108e67220d27408efe/google/auth/_oauth2client.py#L136) credentials:\r\n\r\n```python\r\nimport google.auth._oauth2client\r\n\r\nnew_credentials = google.auth._oauth2client.convert(old_credentials)\r\n```\r\n\r\nWe have two options:\r\n\r\n1. Tell users who still want to use oauth2client to use `google.auth._oauth2client` to convert their credentials.\r\n2. Add code in our `Client` base class to automatically convert credentials and issue a warning.\r\n\r\nI strongly prefer (1) so that we do not prolong oauth2client's lifetime and so that we can more quickly free ourselves to switch http libraries.\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n\nfrom google.oauth2 import service_account\nimport six\n\nfrom google.cloud._helpers import _determine_default_project\nfrom google.cloud._http import Connection\nfrom google.cloud.credentials import get_credentials\n\n\nclass _ClientFactoryMixin(object):\n \"\"\"Mixin to allow factories that create credentials.\n\n .. note::\n\n This class is virtual.\n \"\"\"\n\n @classmethod\n def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\n \"\"\"Factory to retrieve JSON credentials while creating client.\n\n :type json_credentials_path: str\n :param json_credentials_path: The path to a private key file (this file\n was given to you when you created the\n service account). This file must contain\n a JSON object with a private key and\n other credentials information (downloaded\n from the Google APIs console).\n\n :type args: tuple\n :param args: Remaining positional arguments to pass to constructor.\n\n :type kwargs: dict\n :param kwargs: Remaining keyword arguments to pass to constructor.\n\n :rtype: :class:`google.cloud.pubsub.client.Client`\n :returns: The client created with the retrieved JSON credentials.\n :raises: :class:`TypeError` if there is a conflict with the kwargs\n and the credentials created by the factory.\n \"\"\"\n if 'credentials' in kwargs:\n raise TypeError('credentials must not be in keyword arguments')\n credentials = service_account.Credentials.from_service_account_file(\n json_credentials_path)\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n\nclass Client(_ClientFactoryMixin):\n \"\"\"Client to bundle configuration needed for API requests.\n\n Assumes that the associated ``_connection_class`` only accepts\n ``http`` and ``credentials`` in its constructor.\n\n :type credentials: :class:`google.auth.credentials.Credentials` or\n :class:`NoneType`\n :param credentials: The OAuth2 Credentials to use for the connection\n owned by this client. If not passed (and if no ``http``\n object is passed), falls back to the default inferred\n from the environment.\n\n :type http: :class:`httplib2.Http` or class that defines ``request()``.\n :param http: An optional HTTP object to make requests. If not passed, an\n ``http`` object is created that is bound to the\n ``credentials`` for the current object.\n \"\"\"\n\n _connection_class = Connection\n\n def __init__(self, credentials=None, http=None):\n if credentials is None and http is None:\n credentials = get_credentials()\n self._connection = self._connection_class(\n credentials=credentials, http=http)\n\n\nclass _ClientProjectMixin(object):\n \"\"\"Mixin to allow setting the project on the client.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :raises: :class:`EnvironmentError` if the project is neither passed in nor\n set in the environment. :class:`ValueError` if the project value\n is invalid.\n \"\"\"\n\n def __init__(self, project=None):\n project = self._determine_default(project)\n if project is None:\n raise EnvironmentError('Project was not passed and could not be '\n 'determined from the environment.')\n if isinstance(project, six.binary_type):\n project = project.decode('utf-8')\n if not isinstance(project, six.string_types):\n raise ValueError('Project must be a string.')\n self.project = project\n\n @staticmethod\n def _determine_default(project):\n \"\"\"Helper: use default project detection.\"\"\"\n return _determine_default_project(project)\n\n\nclass JSONClient(Client, _ClientProjectMixin):\n \"\"\"Client for Google JSON-based API.\n\n Assumes such APIs use the ``project`` and the client needs to store this\n value.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :type credentials: :class:`google.auth.credentials.Credentials` or\n :class:`NoneType`\n :param credentials: The OAuth2 Credentials to use for the connection\n owned by this client. If not passed (and if no ``http``\n object is passed), falls back to the default inferred\n from the environment.\n\n :type http: :class:`httplib2.Http` or class that defines ``request()``.\n :param http: An optional HTTP object to make requests. If not passed, an\n ``http`` object is created that is bound to the\n ``credentials`` for the current object.\n\n :raises: :class:`ValueError` if the project is neither passed in nor\n set in the environment.\n \"\"\"\n\n def __init__(self, project=None, credentials=None, http=None):\n _ClientProjectMixin.__init__(self, project=project)\n Client.__init__(self, credentials=credentials, http=http)\n", "path": "core/google/cloud/client.py"}]}
| 2,483 | 273 |
gh_patches_debug_42032
|
rasdani/github-patches
|
git_diff
|
translate__pootle-6200
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix fk issues in 2.6 migration/setup
seems like (at least for some users) there is foreign key constraints that are not removed during migration.
we probably need to add something here https://github.com/translate/pootle/blob/stable/2.6.2/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py
specifically, to remove fk constraints to this table on unit commented_by/submitted_by
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2013, 2014 Zuza Software Foundation
5 #
6 # This file is part of Pootle.
7 #
8 # Pootle is free software; you can redistribute it and/or modify it under the
9 # terms of the GNU General Public License as published by the Free Software
10 # Foundation; either version 2 of the License, or (at your option) any later
11 # version.
12 #
13 # Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
14 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
15 # A PARTICULAR PURPOSE. See the GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License along with
18 # Pootle; if not, see <http://www.gnu.org/licenses/>.
19
20 import logging
21 import os
22
23 # This must be run before importing Django.
24 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
25
26 from django.core.management import call_command
27 from django.core.management.base import CommandError, NoArgsCommand
28
29 from pootle.__version__ import build as NEW_POOTLE_BUILD
30 from pootle_app.models.pootle_config import get_pootle_build
31
32
33 class Command(NoArgsCommand):
34 help = 'Runs the install/upgrade machinery.'
35
36 def handle_noargs(self, **options):
37 """Run the install or upgrade machinery.
38
39 If there is an up-to-date Pootle setup then no action is performed.
40 """
41 current_buildversion = get_pootle_build()
42
43 if not current_buildversion:
44 raise CommandError('Pootle 2.6.1 is not meant to be used in real '
45 'world deployments.'
46 '\n\n'
47 'If you want to install a fresh Pootle then '
48 'install Pootle 2.7.0 or later.'
49 '\n\n'
50 'Otherwise you are upgrading Pootle and you '
51 'already have have upgraded up to the 2.6.1 '
52 'stage, so you must now proceed now with the '
53 'final upgrade to Pootle 2.7.0 or later.')
54 elif current_buildversion < 22000:
55 # Trying to upgrade a deployment older than Pootle 2.5.0 for which
56 # we don't provide a direct upgrade.
57 raise CommandError('This Pootle installation is too old. Please '
58 'upgrade first to 2.5.1.3 before upgrading to '
59 'this version.')
60 elif current_buildversion < NEW_POOTLE_BUILD:
61 logging.info('Upgrading existing Pootle installation.')
62
63 call_command('syncdb', interactive=False)
64
65 if current_buildversion < 25100:
66 # We are upgrading from a pre-South installation (before Pootle
67 # 2.5.1), so it is necessary to fake the first migration for
68 # some apps.
69 OLD_APPS = ("pootle_app", "pootle_language",
70 "pootle_notifications", "pootle_project",
71 "pootle_statistics", "pootle_store",
72 "pootle_translationproject", "staticpages")
73
74 for app in OLD_APPS:
75 call_command("migrate", app, "0001", fake=True, interactive=False)
76
77 call_command('migrate', interactive=False)
78 call_command('upgrade')
79
80 logging.warning('\n\n\n Warning: Pootle 2.6.1 is an interim '
81 'release (a migration step to Pootle'
82 '\n 2.7.0). Do not use Pootle 2.6.1 '
83 'for any deployment.\n\n')
84
[end of pootle/apps/pootle_app/management/commands/setup.py]
[start of pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py]
1 # -*- coding: utf-8 -*-
2 from south.utils import datetime_utils as datetime
3 from south.db import db
4 from south.v2 import SchemaMigration
5 from django.db import connection
6
7
8 class Migration(SchemaMigration):
9 depends_on = (
10 ("pootle_tagging", "0003_del_tag__del_taggeditem"),
11 )
12
13 no_dry_run = True
14
15 def forwards(self, orm):
16 # Deleting remnants of model 'PootleProfile'
17 if u'pootle_app_pootleprofile' in connection.introspection.table_names():
18 db.delete_table(u'pootle_app_pootleprofile')
19
20
21 def backwards(self, orm):
22 raise RuntimeError("Cannot reverse this migration.")
23
24 models = {
25 u'accounts.user': {
26 'Meta': {'object_name': 'User'},
27 '_unit_rows': ('django.db.models.fields.SmallIntegerField', [], {'default': '9', 'db_column': "'unit_rows'"}),
28 'alt_src_langs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_alt_src_langs'", 'blank': 'True', 'db_index': 'True', 'to': u"orm['pootle_language.Language']"}),
29 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
30 'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
31 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
32 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
33 u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
34 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
35 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
36 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
37 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
38 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
39 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
40 },
41 u'auth.group': {
42 'Meta': {'object_name': 'Group'},
43 u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
44 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
45 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
46 },
47 u'auth.permission': {
48 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
49 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
50 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
51 u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
52 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
53 },
54 u'contenttypes.contenttype': {
55 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
56 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
57 u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
58 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
59 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
60 },
61 'pootle_app.directory': {
62 'Meta': {'ordering': "['name']", 'object_name': 'Directory'},
63 u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
64 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
65 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_dirs'", 'null': 'True', 'to': "orm['pootle_app.Directory']"}),
66 'pootle_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
67 },
68 u'pootle_language.language': {
69 'Meta': {'ordering': "['code']", 'object_name': 'Language', 'db_table': "'pootle_app_language'"},
70 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
71 'description': ('pootle.core.markup.fields.MarkupField', [], {'blank': 'True'}),
72 'directory': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pootle_app.Directory']", 'unique': 'True'}),
73 'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
74 u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
75 'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
76 'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
77 'specialchars': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
78 }
79 }
80
81 complete_apps = ['accounts']
82
[end of pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py b/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py
--- a/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py
+++ b/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py
@@ -14,6 +14,9 @@
def forwards(self, orm):
# Deleting remnants of model 'PootleProfile'
+ db.delete_foreign_key('pootle_store_unit', 'commented_by_id')
+ db.delete_foreign_key('pootle_store_unit', 'submitted_by_id')
+ db.delete_foreign_key('pootle_app_submission', 'submitter_id')
if u'pootle_app_pootleprofile' in connection.introspection.table_names():
db.delete_table(u'pootle_app_pootleprofile')
diff --git a/pootle/apps/pootle_app/management/commands/setup.py b/pootle/apps/pootle_app/management/commands/setup.py
--- a/pootle/apps/pootle_app/management/commands/setup.py
+++ b/pootle/apps/pootle_app/management/commands/setup.py
@@ -26,7 +26,8 @@
from django.core.management import call_command
from django.core.management.base import CommandError, NoArgsCommand
-from pootle.__version__ import build as NEW_POOTLE_BUILD
+from pootle.__version__ import (build as NEW_POOTLE_BUILD,
+ sver as pootle_version)
from pootle_app.models.pootle_config import get_pootle_build
@@ -41,16 +42,17 @@
current_buildversion = get_pootle_build()
if not current_buildversion:
- raise CommandError('Pootle 2.6.1 is not meant to be used in real '
+ raise CommandError('Pootle %s is not meant to be used in real '
'world deployments.'
'\n\n'
'If you want to install a fresh Pootle then '
'install Pootle 2.7.0 or later.'
'\n\n'
'Otherwise you are upgrading Pootle and you '
- 'already have have upgraded up to the 2.6.1 '
+ 'already have have upgraded up to the %s '
'stage, so you must now proceed now with the '
- 'final upgrade to Pootle 2.7.0 or later.')
+ 'final upgrade to Pootle 2.7.0 or later.' %
+ (pootle_version, pootle_version))
elif current_buildversion < 22000:
# Trying to upgrade a deployment older than Pootle 2.5.0 for which
# we don't provide a direct upgrade.
@@ -72,12 +74,14 @@
"pootle_translationproject", "staticpages")
for app in OLD_APPS:
- call_command("migrate", app, "0001", fake=True, interactive=False)
+ call_command("migrate", app, "0001", fake=True,
+ interactive=False)
call_command('migrate', interactive=False)
call_command('upgrade')
- logging.warning('\n\n\n Warning: Pootle 2.6.1 is an interim '
+ logging.warning('\n\n\n Warning: Pootle %s is an interim '
'release (a migration step to Pootle'
- '\n 2.7.0). Do not use Pootle 2.6.1 '
- 'for any deployment.\n\n')
+ '\n 2.7.0). Do not use Pootle %s '
+ 'for any deployment.\n\n' %
+ (pootle_version, pootle_version))
|
{"golden_diff": "diff --git a/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py b/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py\n--- a/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py\n+++ b/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py\n@@ -14,6 +14,9 @@\n \n def forwards(self, orm):\n # Deleting remnants of model 'PootleProfile'\n+ db.delete_foreign_key('pootle_store_unit', 'commented_by_id')\n+ db.delete_foreign_key('pootle_store_unit', 'submitted_by_id')\n+ db.delete_foreign_key('pootle_app_submission', 'submitter_id')\n if u'pootle_app_pootleprofile' in connection.introspection.table_names():\n db.delete_table(u'pootle_app_pootleprofile')\n \ndiff --git a/pootle/apps/pootle_app/management/commands/setup.py b/pootle/apps/pootle_app/management/commands/setup.py\n--- a/pootle/apps/pootle_app/management/commands/setup.py\n+++ b/pootle/apps/pootle_app/management/commands/setup.py\n@@ -26,7 +26,8 @@\n from django.core.management import call_command\n from django.core.management.base import CommandError, NoArgsCommand\n \n-from pootle.__version__ import build as NEW_POOTLE_BUILD\n+from pootle.__version__ import (build as NEW_POOTLE_BUILD,\n+ sver as pootle_version)\n from pootle_app.models.pootle_config import get_pootle_build\n \n \n@@ -41,16 +42,17 @@\n current_buildversion = get_pootle_build()\n \n if not current_buildversion:\n- raise CommandError('Pootle 2.6.1 is not meant to be used in real '\n+ raise CommandError('Pootle %s is not meant to be used in real '\n 'world deployments.'\n '\\n\\n'\n 'If you want to install a fresh Pootle then '\n 'install Pootle 2.7.0 or later.'\n '\\n\\n'\n 'Otherwise you are upgrading Pootle and you '\n- 'already have have upgraded up to the 2.6.1 '\n+ 'already have have upgraded up to the %s '\n 'stage, so you must now proceed now with the '\n- 'final upgrade to Pootle 2.7.0 or later.')\n+ 'final upgrade to Pootle 2.7.0 or later.' %\n+ (pootle_version, pootle_version))\n elif current_buildversion < 22000:\n # Trying to upgrade a deployment older than Pootle 2.5.0 for which\n # we don't provide a direct upgrade.\n@@ -72,12 +74,14 @@\n \"pootle_translationproject\", \"staticpages\")\n \n for app in OLD_APPS:\n- call_command(\"migrate\", app, \"0001\", fake=True, interactive=False)\n+ call_command(\"migrate\", app, \"0001\", fake=True,\n+ interactive=False)\n \n call_command('migrate', interactive=False)\n call_command('upgrade')\n \n- logging.warning('\\n\\n\\n Warning: Pootle 2.6.1 is an interim '\n+ logging.warning('\\n\\n\\n Warning: Pootle %s is an interim '\n 'release (a migration step to Pootle'\n- '\\n 2.7.0). Do not use Pootle 2.6.1 '\n- 'for any deployment.\\n\\n')\n+ '\\n 2.7.0). Do not use Pootle %s '\n+ 'for any deployment.\\n\\n' %\n+ (pootle_version, pootle_version))\n", "issue": "Fix fk issues in 2.6 migration/setup\nseems like (at least for some users) there is foreign key constraints that are not removed during migration.\r\n\r\nwe probably need to add something here https://github.com/translate/pootle/blob/stable/2.6.2/pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py\r\n\r\nspecifically, to remove fk constraints to this table on unit commented_by/submitted_by\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013, 2014 Zuza Software Foundation\n#\n# This file is part of Pootle.\n#\n# Pootle is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# Pootle; if not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport os\n\n# This must be run before importing Django.\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom django.core.management import call_command\nfrom django.core.management.base import CommandError, NoArgsCommand\n\nfrom pootle.__version__ import build as NEW_POOTLE_BUILD\nfrom pootle_app.models.pootle_config import get_pootle_build\n\n\nclass Command(NoArgsCommand):\n help = 'Runs the install/upgrade machinery.'\n\n def handle_noargs(self, **options):\n \"\"\"Run the install or upgrade machinery.\n\n If there is an up-to-date Pootle setup then no action is performed.\n \"\"\"\n current_buildversion = get_pootle_build()\n\n if not current_buildversion:\n raise CommandError('Pootle 2.6.1 is not meant to be used in real '\n 'world deployments.'\n '\\n\\n'\n 'If you want to install a fresh Pootle then '\n 'install Pootle 2.7.0 or later.'\n '\\n\\n'\n 'Otherwise you are upgrading Pootle and you '\n 'already have have upgraded up to the 2.6.1 '\n 'stage, so you must now proceed now with the '\n 'final upgrade to Pootle 2.7.0 or later.')\n elif current_buildversion < 22000:\n # Trying to upgrade a deployment older than Pootle 2.5.0 for which\n # we don't provide a direct upgrade.\n raise CommandError('This Pootle installation is too old. Please '\n 'upgrade first to 2.5.1.3 before upgrading to '\n 'this version.')\n elif current_buildversion < NEW_POOTLE_BUILD:\n logging.info('Upgrading existing Pootle installation.')\n\n call_command('syncdb', interactive=False)\n\n if current_buildversion < 25100:\n # We are upgrading from a pre-South installation (before Pootle\n # 2.5.1), so it is necessary to fake the first migration for\n # some apps.\n OLD_APPS = (\"pootle_app\", \"pootle_language\",\n \"pootle_notifications\", \"pootle_project\",\n \"pootle_statistics\", \"pootle_store\",\n \"pootle_translationproject\", \"staticpages\")\n\n for app in OLD_APPS:\n call_command(\"migrate\", app, \"0001\", fake=True, interactive=False)\n\n call_command('migrate', interactive=False)\n call_command('upgrade')\n\n logging.warning('\\n\\n\\n Warning: Pootle 2.6.1 is an interim '\n 'release (a migration step to Pootle'\n '\\n 2.7.0). Do not use Pootle 2.6.1 '\n 'for any deployment.\\n\\n')\n", "path": "pootle/apps/pootle_app/management/commands/setup.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import connection\n\n\nclass Migration(SchemaMigration):\n depends_on = (\n (\"pootle_tagging\", \"0003_del_tag__del_taggeditem\"),\n )\n\n no_dry_run = True\n\n def forwards(self, orm):\n # Deleting remnants of model 'PootleProfile'\n if u'pootle_app_pootleprofile' in connection.introspection.table_names():\n db.delete_table(u'pootle_app_pootleprofile')\n\n\n def backwards(self, orm):\n raise RuntimeError(\"Cannot reverse this migration.\")\n\n models = {\n u'accounts.user': {\n 'Meta': {'object_name': 'User'},\n '_unit_rows': ('django.db.models.fields.SmallIntegerField', [], {'default': '9', 'db_column': \"'unit_rows'\"}),\n 'alt_src_langs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'user_alt_src_langs'\", 'blank': 'True', 'db_index': 'True', 'to': u\"orm['pootle_language.Language']\"}),\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),\n 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"u'user_set'\", 'blank': 'True', 'to': u\"orm['auth.Group']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"u'user_set'\", 'blank': 'True', 'to': u\"orm['auth.Permission']\"}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n u'auth.group': {\n 'Meta': {'object_name': 'Group'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n u'auth.permission': {\n 'Meta': {'ordering': \"(u'content_type__app_label', u'content_type__model', u'codename')\", 'unique_together': \"((u'content_type', u'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'pootle_app.directory': {\n 'Meta': {'ordering': \"['name']\", 'object_name': 'Directory'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'child_dirs'\", 'null': 'True', 'to': \"orm['pootle_app.Directory']\"}),\n 'pootle_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})\n },\n u'pootle_language.language': {\n 'Meta': {'ordering': \"['code']\", 'object_name': 'Language', 'db_table': \"'pootle_app_language'\"},\n 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),\n 'description': ('pootle.core.markup.fields.MarkupField', [], {'blank': 'True'}),\n 'directory': ('django.db.models.fields.related.OneToOneField', [], {'to': \"orm['pootle_app.Directory']\", 'unique': 'True'}),\n 'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),\n 'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),\n 'specialchars': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})\n }\n }\n\n complete_apps = ['accounts']\n", "path": "pootle/apps/accounts/migrations/0005_drop_pootle_app_pootleprofile.py"}]}
| 3,186 | 891 |
gh_patches_debug_24075
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-2787
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[APP SUBMITTED]: AttributeError: 'BraceAdapter' object has no attribute 'DEBUG'
### INFO
**Python Version**: `2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:53:40) [MSC v.1500 64 bit (AMD64)]`
**Operating System**: `Windows-10-10.0.15063`
**Locale**: `cp1252`
**Branch**: [develop](../tree/develop)
**Database**: `44.8`
**Commit**: pymedusa/Medusa@a2aaaf4dd43c018019a67fe1a27074b124681fa4
**Link to Log**: https://gist.github.com/66f5f5fc2f3cc2fbaec732d8d994335e
### ERROR
<pre>
2017-05-12 08:29:52 ERROR SEARCHQUEUE-FORCED-309269 :: [Rarbg] :: [a2aaaf4] Unknown exception while searching Rarbg. Error: "'BraceAdapter' object has no attribute 'DEBUG'"
Traceback (most recent call last):
File "C:\Medusa\medusa\search\core.py", line 547, in search_providers
down_cur_quality, manual_search, manual_search_type)
File "C:\Medusa\medusa\providers\generic_provider.py", line 207, in find_search_results
items_list += self.search(search_string, ep_obj=episode)
File "C:\Medusa\medusa\providers\torrent\json\rarbg.py", line 136, in search
log_level = log.DEBUG
AttributeError: 'BraceAdapter' object has no attribute 'DEBUG'
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
</issue>
<code>
[start of medusa/providers/torrent/json/rarbg.py]
1 # coding=utf-8
2
3 """Provider code for RARBG."""
4
5 from __future__ import unicode_literals
6
7 import datetime
8 import logging
9 import time
10 import traceback
11
12 from medusa import (
13 app,
14 tv,
15 )
16 from medusa.helper.common import convert_size, try_int
17 from medusa.logger.adapters.style import BraceAdapter
18 from medusa.providers.torrent.torrent_provider import TorrentProvider
19
20 log = BraceAdapter(logging.getLogger(__name__))
21 log.logger.addHandler(logging.NullHandler())
22
23
24 class RarbgProvider(TorrentProvider):
25 """RARBG Torrent provider."""
26
27 def __init__(self):
28 """Initialize the class."""
29 super(self.__class__, self).__init__('Rarbg')
30
31 # Credentials
32 self.public = True
33 self.token = None
34 self.token_expires = None
35
36 # URLs
37 self.url = 'https://rarbg.com' # Spec: https://torrentapi.org/apidocs_v2.txt
38 self.urls = {
39 'api': 'http://torrentapi.org/pubapi_v2.php',
40 }
41
42 # Proper Strings
43 self.proper_strings = ['{{PROPER|REPACK|REAL|RERIP}}']
44
45 # Miscellaneous Options
46 self.ranked = None
47 self.sorting = None
48
49 # Torrent Stats
50 self.minseed = None
51 self.minleech = None
52
53 # Cache
54 self.cache = tv.Cache(self, min_time=10) # only poll RARBG every 10 minutes max
55
56 def search(self, search_strings, age=0, ep_obj=None):
57 """
58 Search a provider and parse the results.
59
60 :param search_strings: A dict with mode (key) and the search value (value)
61 :param age: Not used
62 :param ep_obj: Not used
63 :returns: A list of search results (structure)
64 """
65 results = []
66 if not self.login():
67 return results
68
69 # Search Params
70 search_params = {
71 'app_id': app.RARBG_APPID,
72 'category': 'tv',
73 'min_seeders': try_int(self.minseed),
74 'min_leechers': try_int(self.minleech),
75 'limit': 100,
76 'format': 'json_extended',
77 'ranked': try_int(self.ranked),
78 'token': self.token,
79 'sort': 'last',
80 'mode': 'list',
81 }
82
83 for mode in search_strings:
84 log.debug('Search mode: {0}', mode)
85
86 if mode == 'RSS':
87 search_params['search_string'] = None
88 search_params['search_tvdb'] = None
89 else:
90 search_params['sort'] = self.sorting if self.sorting else 'seeders'
91 search_params['mode'] = 'search'
92 search_params['search_tvdb'] = self._get_tvdb_id()
93
94 for search_string in search_strings[mode]:
95 if mode != 'RSS':
96 log.debug('Search string: {search}',
97 {'search': search_string})
98 if self.ranked:
99 log.debug('Searching only ranked torrents')
100
101 search_params['search_string'] = search_string
102
103 # Check if token is still valid before search
104 if not self.login():
105 continue
106
107 # Maximum requests allowed are 1req/2sec
108 # Changing to 5 because of server clock desync
109 time.sleep(5)
110
111 search_url = self.urls['api']
112 response = self.get_url(search_url, params=search_params, returns='response')
113 if not response or not response.content:
114 log.debug('No data returned from provider')
115 continue
116
117 try:
118 jdata = response.json()
119 except ValueError:
120 log.debug('No data returned from provider')
121 continue
122
123 error = jdata.get('error')
124 error_code = jdata.get('error_code')
125 if error:
126 # List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312
127 if error_code == 5:
128 # 5 = Too many requests per second
129 log_level = log.INFO
130 elif error_code not in (4, 8, 10, 12, 14, 20):
131 # 4 = Invalid token. Use get_token for a new one!
132 # 8, 10, 12, 14 = Cant find * in database. Are you sure this * exists?
133 # 20 = No results found
134 log_level = log.WARNING
135 else:
136 log_level = log.DEBUG
137 log.log(log_level, '{msg} Code: {code}', {'msg': error, 'code': error_code})
138 continue
139
140 results += self.parse(jdata, mode)
141
142 return results
143
144 def parse(self, data, mode):
145 """
146 Parse search results for items.
147
148 :param data: The raw response from a search
149 :param mode: The current mode used to search, e.g. RSS
150
151 :return: A list of items found
152 """
153 items = []
154
155 torrent_rows = data.get('torrent_results', {})
156
157 if not torrent_rows:
158 log.debug('Data returned from provider does not contain any torrents')
159 return items
160
161 for row in torrent_rows:
162 try:
163 title = row.pop('title')
164 download_url = row.pop('download') + self._custom_trackers
165 if not all([title, download_url]):
166 continue
167
168 seeders = row.pop('seeders')
169 leechers = row.pop('leechers')
170
171 # Filter unseeded torrent
172 if seeders < min(self.minseed, 1):
173 if mode != 'RSS':
174 log.debug("Discarding torrent because it doesn't meet the"
175 " minimum seeders: {0}. Seeders: {1}",
176 title, seeders)
177 continue
178
179 torrent_size = row.pop('size', -1)
180 size = convert_size(torrent_size) or -1
181
182 pubdate_raw = row.pop('pubdate')
183 pubdate = self._parse_pubdate(pubdate_raw)
184
185 item = {
186 'title': title,
187 'link': download_url,
188 'size': size,
189 'seeders': seeders,
190 'leechers': leechers,
191 'pubdate': pubdate,
192 }
193 if mode != 'RSS':
194 log.debug('Found result: {0} with {1} seeders and {2} leechers',
195 title, seeders, leechers)
196
197 items.append(item)
198 except (AttributeError, TypeError, KeyError, ValueError, IndexError):
199 log.error('Failed parsing provider. Traceback: {0!r}',
200 traceback.format_exc())
201
202 return items
203
204 def login(self):
205 """Login method used for logging in before doing search and torrent downloads."""
206 if self.token and self.token_expires and datetime.datetime.now() < self.token_expires:
207 return True
208
209 login_params = {
210 'get_token': 'get_token',
211 'format': 'json',
212 'app_id': app.RARBG_APPID,
213 }
214
215 response = self.get_url(self.urls['api'], params=login_params, returns='json')
216 if not response:
217 log.warning('Unable to connect to provider')
218 return False
219
220 self.token = response.get('token')
221 self.token_expires = datetime.datetime.now() + datetime.timedelta(minutes=14) if self.token else None
222 return self.token is not None
223
224
225 provider = RarbgProvider()
226
[end of medusa/providers/torrent/json/rarbg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/providers/torrent/json/rarbg.py b/medusa/providers/torrent/json/rarbg.py
--- a/medusa/providers/torrent/json/rarbg.py
+++ b/medusa/providers/torrent/json/rarbg.py
@@ -126,14 +126,14 @@
# List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312
if error_code == 5:
# 5 = Too many requests per second
- log_level = log.INFO
+ log_level = logging.INFO
elif error_code not in (4, 8, 10, 12, 14, 20):
# 4 = Invalid token. Use get_token for a new one!
# 8, 10, 12, 14 = Cant find * in database. Are you sure this * exists?
# 20 = No results found
- log_level = log.WARNING
+ log_level = logging.WARNING
else:
- log_level = log.DEBUG
+ log_level = logging.DEBUG
log.log(log_level, '{msg} Code: {code}', {'msg': error, 'code': error_code})
continue
|
{"golden_diff": "diff --git a/medusa/providers/torrent/json/rarbg.py b/medusa/providers/torrent/json/rarbg.py\n--- a/medusa/providers/torrent/json/rarbg.py\n+++ b/medusa/providers/torrent/json/rarbg.py\n@@ -126,14 +126,14 @@\n # List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312\n if error_code == 5:\n # 5 = Too many requests per second\n- log_level = log.INFO\n+ log_level = logging.INFO\n elif error_code not in (4, 8, 10, 12, 14, 20):\n # 4 = Invalid token. Use get_token for a new one!\n # 8, 10, 12, 14 = Cant find * in database. Are you sure this * exists?\n # 20 = No results found\n- log_level = log.WARNING\n+ log_level = logging.WARNING\n else:\n- log_level = log.DEBUG\n+ log_level = logging.DEBUG\n log.log(log_level, '{msg} Code: {code}', {'msg': error, 'code': error_code})\n continue\n", "issue": "[APP SUBMITTED]: AttributeError: 'BraceAdapter' object has no attribute 'DEBUG'\n### INFO\n**Python Version**: `2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:53:40) [MSC v.1500 64 bit (AMD64)]`\n**Operating System**: `Windows-10-10.0.15063`\n**Locale**: `cp1252`\n**Branch**: [develop](../tree/develop)\n**Database**: `44.8`\n**Commit**: pymedusa/Medusa@a2aaaf4dd43c018019a67fe1a27074b124681fa4\n**Link to Log**: https://gist.github.com/66f5f5fc2f3cc2fbaec732d8d994335e\n### ERROR\n<pre>\n2017-05-12 08:29:52 ERROR SEARCHQUEUE-FORCED-309269 :: [Rarbg] :: [a2aaaf4] Unknown exception while searching Rarbg. Error: \"'BraceAdapter' object has no attribute 'DEBUG'\"\nTraceback (most recent call last):\n File \"C:\\Medusa\\medusa\\search\\core.py\", line 547, in search_providers\n down_cur_quality, manual_search, manual_search_type)\n File \"C:\\Medusa\\medusa\\providers\\generic_provider.py\", line 207, in find_search_results\n items_list += self.search(search_string, ep_obj=episode)\n File \"C:\\Medusa\\medusa\\providers\\torrent\\json\\rarbg.py\", line 136, in search\n log_level = log.DEBUG\nAttributeError: 'BraceAdapter' object has no attribute 'DEBUG'\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for RARBG.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport datetime\nimport logging\nimport time\nimport traceback\n\nfrom medusa import (\n app,\n tv,\n)\nfrom medusa.helper.common import convert_size, try_int\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.torrent.torrent_provider import TorrentProvider\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass RarbgProvider(TorrentProvider):\n \"\"\"RARBG Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(self.__class__, self).__init__('Rarbg')\n\n # Credentials\n self.public = True\n self.token = None\n self.token_expires = None\n\n # URLs\n self.url = 'https://rarbg.com' # Spec: https://torrentapi.org/apidocs_v2.txt\n self.urls = {\n 'api': 'http://torrentapi.org/pubapi_v2.php',\n }\n\n # Proper Strings\n self.proper_strings = ['{{PROPER|REPACK|REAL|RERIP}}']\n\n # Miscellaneous Options\n self.ranked = None\n self.sorting = None\n\n # Torrent Stats\n self.minseed = None\n self.minleech = None\n\n # Cache\n self.cache = tv.Cache(self, min_time=10) # only poll RARBG every 10 minutes max\n\n def search(self, search_strings, age=0, ep_obj=None):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n # Search Params\n search_params = {\n 'app_id': app.RARBG_APPID,\n 'category': 'tv',\n 'min_seeders': try_int(self.minseed),\n 'min_leechers': try_int(self.minleech),\n 'limit': 100,\n 'format': 'json_extended',\n 'ranked': try_int(self.ranked),\n 'token': self.token,\n 'sort': 'last',\n 'mode': 'list',\n }\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n\n if mode == 'RSS':\n search_params['search_string'] = None\n search_params['search_tvdb'] = None\n else:\n search_params['sort'] = self.sorting if self.sorting else 'seeders'\n search_params['mode'] = 'search'\n search_params['search_tvdb'] = self._get_tvdb_id()\n\n for search_string in search_strings[mode]:\n if mode != 'RSS':\n log.debug('Search string: {search}',\n {'search': search_string})\n if self.ranked:\n log.debug('Searching only ranked torrents')\n\n search_params['search_string'] = search_string\n\n # Check if token is still valid before search\n if not self.login():\n continue\n\n # Maximum requests allowed are 1req/2sec\n # Changing to 5 because of server clock desync\n time.sleep(5)\n\n search_url = self.urls['api']\n response = self.get_url(search_url, params=search_params, returns='response')\n if not response or not response.content:\n log.debug('No data returned from provider')\n continue\n\n try:\n jdata = response.json()\n except ValueError:\n log.debug('No data returned from provider')\n continue\n\n error = jdata.get('error')\n error_code = jdata.get('error_code')\n if error:\n # List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312\n if error_code == 5:\n # 5 = Too many requests per second\n log_level = log.INFO\n elif error_code not in (4, 8, 10, 12, 14, 20):\n # 4 = Invalid token. Use get_token for a new one!\n # 8, 10, 12, 14 = Cant find * in database. Are you sure this * exists?\n # 20 = No results found\n log_level = log.WARNING\n else:\n log_level = log.DEBUG\n log.log(log_level, '{msg} Code: {code}', {'msg': error, 'code': error_code})\n continue\n\n results += self.parse(jdata, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n torrent_rows = data.get('torrent_results', {})\n\n if not torrent_rows:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n for row in torrent_rows:\n try:\n title = row.pop('title')\n download_url = row.pop('download') + self._custom_trackers\n if not all([title, download_url]):\n continue\n\n seeders = row.pop('seeders')\n leechers = row.pop('leechers')\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n log.debug(\"Discarding torrent because it doesn't meet the\"\n \" minimum seeders: {0}. Seeders: {1}\",\n title, seeders)\n continue\n\n torrent_size = row.pop('size', -1)\n size = convert_size(torrent_size) or -1\n\n pubdate_raw = row.pop('pubdate')\n pubdate = self._parse_pubdate(pubdate_raw)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0} with {1} seeders and {2} leechers',\n title, seeders, leechers)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n log.error('Failed parsing provider. Traceback: {0!r}',\n traceback.format_exc())\n\n return items\n\n def login(self):\n \"\"\"Login method used for logging in before doing search and torrent downloads.\"\"\"\n if self.token and self.token_expires and datetime.datetime.now() < self.token_expires:\n return True\n\n login_params = {\n 'get_token': 'get_token',\n 'format': 'json',\n 'app_id': app.RARBG_APPID,\n }\n\n response = self.get_url(self.urls['api'], params=login_params, returns='json')\n if not response:\n log.warning('Unable to connect to provider')\n return False\n\n self.token = response.get('token')\n self.token_expires = datetime.datetime.now() + datetime.timedelta(minutes=14) if self.token else None\n return self.token is not None\n\n\nprovider = RarbgProvider()\n", "path": "medusa/providers/torrent/json/rarbg.py"}]}
| 3,230 | 287 |
gh_patches_debug_1204
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-1497
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Question]: How to get the right BrowserType from a device name?
### Your question
I noticed that the CLI is able to figure out the right `BrowserType` to use when it is launched from the command line:
```
playwright open --device="Desktop Safari" wikipedia.org # Webkit
playwright open --device="Desktop Firefox" wikipedia.org # Firefox
playwright open --device="Desktop Chrome" wikipedia.org # Chrome
```
But [the documentation](https://playwright.dev/python/docs/api/class-playwright#playwright-devices) seems to say that I have to initialize a `BrowserType` before I can pass the settings to the context, which partially defeats the purpose of the device settings.
I can implement my own logic do initialize the right `BrowserType` for each device, but as `playwright open` can already do that, that seems superfluous.
</issue>
<code>
[start of playwright/_impl/_playwright.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict
16
17 from playwright._impl._browser_type import BrowserType
18 from playwright._impl._connection import ChannelOwner, from_channel
19 from playwright._impl._fetch import APIRequest
20 from playwright._impl._local_utils import LocalUtils
21 from playwright._impl._selectors import Selectors, SelectorsOwner
22
23
24 class Playwright(ChannelOwner):
25 devices: Dict
26 selectors: Selectors
27 chromium: BrowserType
28 firefox: BrowserType
29 webkit: BrowserType
30 request: APIRequest
31
32 def __init__(
33 self, parent: ChannelOwner, type: str, guid: str, initializer: Dict
34 ) -> None:
35 super().__init__(parent, type, guid, initializer)
36 self.request = APIRequest(self)
37 self.chromium = from_channel(initializer["chromium"])
38 self.chromium._playwright = self
39 self.firefox = from_channel(initializer["firefox"])
40 self.firefox._playwright = self
41 self.webkit = from_channel(initializer["webkit"])
42 self.webkit._playwright = self
43
44 self.selectors = Selectors(self._loop, self._dispatcher_fiber)
45 selectors_owner: SelectorsOwner = from_channel(initializer["selectors"])
46 self.selectors._add_channel(selectors_owner)
47
48 self._connection.on(
49 "close", lambda: self.selectors._remove_channel(selectors_owner)
50 )
51 self.devices = {}
52 self.devices = {
53 device["name"]: parse_device_descriptor(device["descriptor"])
54 for device in initializer["deviceDescriptors"]
55 }
56 self._utils: LocalUtils = from_channel(initializer["utils"])
57
58 def __getitem__(self, value: str) -> "BrowserType":
59 if value == "chromium":
60 return self.chromium
61 elif value == "firefox":
62 return self.firefox
63 elif value == "webkit":
64 return self.webkit
65 raise ValueError("Invalid browser " + value)
66
67 def _set_selectors(self, selectors: SelectorsOwner) -> None:
68 selectors_owner = from_channel(self._initializer["selectors"])
69 self.selectors._remove_channel(selectors_owner)
70 self.selectors = selectors
71 self.selectors._add_channel(selectors_owner)
72
73 def stop(self) -> None:
74 pass
75
76
77 def parse_device_descriptor(dict: Dict) -> Dict:
78 return {
79 "user_agent": dict["userAgent"],
80 "viewport": dict["viewport"],
81 "device_scale_factor": dict["deviceScaleFactor"],
82 "is_mobile": dict["isMobile"],
83 "has_touch": dict["hasTouch"],
84 }
85
[end of playwright/_impl/_playwright.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/playwright/_impl/_playwright.py b/playwright/_impl/_playwright.py
--- a/playwright/_impl/_playwright.py
+++ b/playwright/_impl/_playwright.py
@@ -81,4 +81,5 @@
"device_scale_factor": dict["deviceScaleFactor"],
"is_mobile": dict["isMobile"],
"has_touch": dict["hasTouch"],
+ "default_browser_type": dict["defaultBrowserType"],
}
|
{"golden_diff": "diff --git a/playwright/_impl/_playwright.py b/playwright/_impl/_playwright.py\n--- a/playwright/_impl/_playwright.py\n+++ b/playwright/_impl/_playwright.py\n@@ -81,4 +81,5 @@\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n+ \"default_browser_type\": dict[\"defaultBrowserType\"],\n }\n", "issue": "[Question]: How to get the right BrowserType from a device name? \n### Your question\n\nI noticed that the CLI is able to figure out the right `BrowserType` to use when it is launched from the command line:\r\n\r\n```\r\nplaywright open --device=\"Desktop Safari\" wikipedia.org # Webkit\r\nplaywright open --device=\"Desktop Firefox\" wikipedia.org # Firefox\r\nplaywright open --device=\"Desktop Chrome\" wikipedia.org # Chrome\r\n``` \r\n\r\nBut [the documentation](https://playwright.dev/python/docs/api/class-playwright#playwright-devices) seems to say that I have to initialize a `BrowserType` before I can pass the settings to the context, which partially defeats the purpose of the device settings.\r\n\r\nI can implement my own logic do initialize the right `BrowserType` for each device, but as `playwright open` can already do that, that seems superfluous.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nfrom playwright._impl._browser_type import BrowserType\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._fetch import APIRequest\nfrom playwright._impl._local_utils import LocalUtils\nfrom playwright._impl._selectors import Selectors, SelectorsOwner\n\n\nclass Playwright(ChannelOwner):\n devices: Dict\n selectors: Selectors\n chromium: BrowserType\n firefox: BrowserType\n webkit: BrowserType\n request: APIRequest\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.request = APIRequest(self)\n self.chromium = from_channel(initializer[\"chromium\"])\n self.chromium._playwright = self\n self.firefox = from_channel(initializer[\"firefox\"])\n self.firefox._playwright = self\n self.webkit = from_channel(initializer[\"webkit\"])\n self.webkit._playwright = self\n\n self.selectors = Selectors(self._loop, self._dispatcher_fiber)\n selectors_owner: SelectorsOwner = from_channel(initializer[\"selectors\"])\n self.selectors._add_channel(selectors_owner)\n\n self._connection.on(\n \"close\", lambda: self.selectors._remove_channel(selectors_owner)\n )\n self.devices = {}\n self.devices = {\n device[\"name\"]: parse_device_descriptor(device[\"descriptor\"])\n for device in initializer[\"deviceDescriptors\"]\n }\n self._utils: LocalUtils = from_channel(initializer[\"utils\"])\n\n def __getitem__(self, value: str) -> \"BrowserType\":\n if value == \"chromium\":\n return self.chromium\n elif value == \"firefox\":\n return self.firefox\n elif value == \"webkit\":\n return self.webkit\n raise ValueError(\"Invalid browser \" + value)\n\n def _set_selectors(self, selectors: SelectorsOwner) -> None:\n selectors_owner = from_channel(self._initializer[\"selectors\"])\n self.selectors._remove_channel(selectors_owner)\n self.selectors = selectors\n self.selectors._add_channel(selectors_owner)\n\n def stop(self) -> None:\n pass\n\n\ndef parse_device_descriptor(dict: Dict) -> Dict:\n return {\n \"user_agent\": dict[\"userAgent\"],\n \"viewport\": dict[\"viewport\"],\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n }\n", "path": "playwright/_impl/_playwright.py"}]}
| 1,564 | 103 |
gh_patches_debug_11614
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-295
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enforce new SECRET_KEY in production environments
The server should refuse to start if the SECRET_KEY does not differ from the default value in our repository.
The importance of this issue depends on whether the Federated Cloud feature is using this key or not.
</issue>
<code>
[start of backend/cms/apps.py]
1 """
2 Django related class representing a config of an app
3 """
4 from django.apps import AppConfig
5
6
7 class CmsConfig(AppConfig):
8 """
9 Class inheriting the django AppConfig
10 """
11
12 name = 'cms'
13
[end of backend/cms/apps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/backend/cms/apps.py b/backend/cms/apps.py
--- a/backend/cms/apps.py
+++ b/backend/cms/apps.py
@@ -1,8 +1,12 @@
"""
Django related class representing a config of an app
"""
+import logging
+import sys
+from django.conf import settings
from django.apps import AppConfig
+logger = logging.getLogger(__name__)
class CmsConfig(AppConfig):
"""
@@ -10,3 +14,8 @@
"""
name = 'cms'
+
+ def ready(self):
+ if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:
+ logger.error("You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!")
+ sys.exit(1)
|
{"golden_diff": "diff --git a/backend/cms/apps.py b/backend/cms/apps.py\n--- a/backend/cms/apps.py\n+++ b/backend/cms/apps.py\n@@ -1,8 +1,12 @@\n \"\"\"\n Django related class representing a config of an app\n \"\"\"\n+import logging\n+import sys\n+from django.conf import settings\n from django.apps import AppConfig\n \n+logger = logging.getLogger(__name__)\n \n class CmsConfig(AppConfig):\n \"\"\"\n@@ -10,3 +14,8 @@\n \"\"\"\n \n name = 'cms'\n+\n+ def ready(self):\n+ if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:\n+ logger.error(\"You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!\")\n+ sys.exit(1)\n", "issue": "Enforce new SECRET_KEY in production environments\nThe server should refuse to start if the SECRET_KEY does not differ from the default value in our repository.\r\n\r\nThe importance of this issue depends on whether the Federated Cloud feature is using this key or not.\n", "before_files": [{"content": "\"\"\"\nDjango related class representing a config of an app\n\"\"\"\nfrom django.apps import AppConfig\n\n\nclass CmsConfig(AppConfig):\n \"\"\"\n Class inheriting the django AppConfig\n \"\"\"\n\n name = 'cms'\n", "path": "backend/cms/apps.py"}]}
| 646 | 204 |
gh_patches_debug_1858
|
rasdani/github-patches
|
git_diff
|
huggingface__accelerate-127
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when loading optimizer state
Thanks for this awesome product!!
When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!
https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
Error when loading optimizer state
Thanks for this awesome product!!
When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!
https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
</issue>
<code>
[start of src/accelerate/optimizer.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import torch
16
17 from packaging import version
18
19 from .state import AcceleratorState, DistributedType, is_tpu_available
20 from .utils import honor_type
21
22
23 if is_tpu_available():
24 import torch_xla.core.xla_model as xm
25
26
27 def move_to_device(state, device):
28 if isinstance(state, (list, tuple)):
29 return honor_type(state, (move_to_device(t, device) for t in state))
30 elif isinstance(state, dict):
31 return type(state)({k: move_to_device(v, device) for k, v in state.items()})
32 elif isinstance(state, torch.Tensor):
33 return state.to(device)
34 return state
35
36
37 class AcceleratedOptimizer(torch.optim.Optimizer):
38 """
39 Internal wrapper around a torch optimizer.
40
41 Args:
42 optimizer (:obj:`torch.optim.optimizer.Optimizer`):
43 The optimizer to wrap.
44 device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):
45 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
46 :obj:`optimizer` on the right device.
47 scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):
48 The scaler to use in the step function if training with mixed precision.
49 """
50
51 def __init__(self, optimizer, device_placement=True, scaler=None):
52 self.optimizer = optimizer
53 self.scaler = scaler
54 self.state = AcceleratorState()
55
56 # Handle device placement
57 if device_placement:
58 state_dict = self.optimizer.state_dict()
59 if self.state.distributed_type == DistributedType.TPU:
60 xm.send_cpu_data_to_device(state_dict, self.state.device)
61 else:
62 state_dict = move_to_device(state_dict, self.state.device)
63 self.optimizer.load_state_dict(state_dict)
64
65 @property
66 def param_groups(self):
67 return self.optimizer.param_groups
68
69 @param_groups.setter
70 def param_groups(self, param_groups):
71 self.optimizer.param_groups = param_groups
72
73 @property
74 def defaults(self):
75 return self.optimizer.defaults
76
77 @defaults.setter
78 def defaults(self, defaults):
79 self.optimizer.defaults = defaults
80
81 def add_param_group(self, param_group):
82 self.optimizer.add_param_group(param_group)
83
84 def load_state_dict(self, state_dict):
85 if self.state.distributed_type == DistributedType.TPU and self.device_placement:
86 xm.send_cpu_data_to_device(state_dict, self.state.device)
87 self.optimizer.load_state_dict(state_dict)
88
89 def state_dict(self):
90 return self.optimizer.state_dict()
91
92 def zero_grad(self, set_to_none=None):
93 if version.parse(torch.__version__) < version.parse("1.7.0"):
94 if set_to_none is not None:
95 raise ValueError(
96 "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for "
97 f"earlier versions (found version {torch.__version__})."
98 )
99 self.optimizer.zero_grad()
100 else:
101 if set_to_none is not None:
102 set_to_none = False
103 self.optimizer.zero_grad(set_to_none=set_to_none)
104
105 def step(self, closure=None):
106 if self.state.distributed_type == DistributedType.TPU:
107 optimizer_args = {"closure": closure} if closure is not None else {}
108 xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
109 elif self.scaler is not None:
110 self.scaler.step(self.optimizer, closure)
111 self.scaler.update()
112 else:
113 self.optimizer.step(closure)
114
115 def _switch_parameters(self, parameters_map):
116 for param_group in self.optimizer.param_groups:
117 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
118
119 @property
120 def is_overflow(self):
121 """This needs to be implemented at the end"""
122 return False # TODO: implement it
123
[end of src/accelerate/optimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -52,6 +52,7 @@
self.optimizer = optimizer
self.scaler = scaler
self.state = AcceleratorState()
+ self.device_placement = device_placement
# Handle device placement
if device_placement:
|
{"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -52,6 +52,7 @@\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n+ self.device_placement = device_placement\n \n # Handle device placement\n if device_placement:\n", "issue": "Error when loading optimizer state\nThanks for this awesome product!!\r\n\r\nWhen I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!\r\nhttps://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86\nError when loading optimizer state\nThanks for this awesome product!!\r\n\r\nWhen I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!\r\nhttps://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\nfrom .utils import honor_type\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"This needs to be implemented at the end\"\"\"\n return False # TODO: implement it\n", "path": "src/accelerate/optimizer.py"}]}
| 1,983 | 93 |
gh_patches_debug_15427
|
rasdani/github-patches
|
git_diff
|
google__jax-574
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jax.config doesn't report command-line flags correctly
Another one from @jmgilmer and I - I don't think the jax config is parsing command line flags correctly. I don't know if this is functionally important or just a reporting error - but it is certainly important for user scripts knowing what flags have been set.
If I run this script:
```
from absl import app, flags
from jax.config import config
FLAGS = flags.FLAGS
def main(_):
print("FLAGS.jax_enable_x64", FLAGS.jax_enable_x64)
print("FLAGS.jax_debug_nans", FLAGS.jax_debug_nans)
print(config.values)
if __name__ == "__main__":
config.config_with_absl()
app.run(main)
```
I get the following problem: jax.config doesn't report the correct flag settings.
```
> python jaxtest.py --jax_enable_x64=1 --jax_debug_nans=1
FLAGS.jax_enable_x64 True
FLAGS.jax_debug_nans True
{'jax_enable_x64': 0, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 0, 'jax_disable_jit': 0}
```
if I run the same with envvars instead it works:
```
JAX_ENABLE_X64=1 JAX_DEBUG_NANS=1 python jaxtest.py
FLAGS.jax_enable_x64 True
FLAGS.jax_debug_nans True
{'jax_enable_x64': 1, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 1, 'jax_disable_jit': 0}
```
I've tried parsing the flags in different ways but nothing seems to fix the issue.
</issue>
<code>
[start of jax/config.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys
16
17
18 class Config(object):
19 def __init__(self):
20 self.values = {}
21 self.meta = {}
22 self.FLAGS = NameSpace(self.read)
23 self.use_absl = False
24
25 def update(self, name, val):
26 self.check_exists(name)
27 if name not in self.values:
28 raise Exception("Unrecognized config option: {}".format(name))
29 self.values[name] = val
30
31 def read(self, name):
32 if self.use_absl:
33 return getattr(self.absl_flags.FLAGS, name)
34 else:
35 self.check_exists(name)
36 return self.values[name]
37
38 def add_option(self, name, default, opt_type, meta_args, meta_kwargs):
39 if name in self.values:
40 raise Exception("Config option {} already defined".format(name))
41 self.values[name] = default
42 self.meta[name] = (opt_type, meta_args, meta_kwargs)
43
44 def check_exists(self, name):
45 if name not in self.values:
46 raise Exception("Unrecognized config option: {}".format(name))
47
48 def DEFINE_bool(self, name, default, *args, **kwargs):
49 self.add_option(name, default, bool, args, kwargs)
50
51 def DEFINE_integer(self, name, default, *args, **kwargs):
52 self.add_option(name, default, int, args, kwargs)
53
54 def DEFINE_string(self, name, default, *args, **kwargs):
55 self.add_option(name, default, str, args, kwargs)
56
57 def DEFINE_enum(self, name, default, *args, **kwargs):
58 self.add_option(name, default, 'enum', args, kwargs)
59
60 def config_with_absl(self):
61 # Run this before calling `app.run(main)` etc
62 import absl.flags as absl_FLAGS
63 from absl import app, flags as absl_flags
64
65 self.use_absl = True
66 self.absl_flags = absl_flags
67 absl_defs = { bool: absl_flags.DEFINE_bool,
68 int: absl_flags.DEFINE_integer,
69 str: absl_flags.DEFINE_string,
70 'enum': absl_flags.DEFINE_enum }
71
72 for name, val in self.values.items():
73 flag_type, meta_args, meta_kwargs = self.meta[name]
74 absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)
75
76 def complete_absl_config(self, absl_flags):
77 for name, _ in self.values.items():
78 self.update(name, getattr(absl_flags.FLAGS, name))
79
80 def parse_flags_with_absl(self):
81 global already_configured_with_absl
82 if not already_configured_with_absl:
83 import absl.flags
84 self.config_with_absl()
85 absl.flags.FLAGS(sys.argv)
86 already_configured_with_absl = True
87
88
89 class NameSpace(object):
90 def __init__(self, getter):
91 self._getter = getter
92
93 def __getattr__(self, name):
94 return self._getter(name)
95
96
97 config = Config()
98 flags = config
99 already_configured_with_absl = False
100
[end of jax/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jax/config.py b/jax/config.py
--- a/jax/config.py
+++ b/jax/config.py
@@ -73,6 +73,8 @@
flag_type, meta_args, meta_kwargs = self.meta[name]
absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)
+ app.call_after_init(lambda: self.complete_absl_config(absl_flags))
+
def complete_absl_config(self, absl_flags):
for name, _ in self.values.items():
self.update(name, getattr(absl_flags.FLAGS, name))
@@ -83,6 +85,7 @@
import absl.flags
self.config_with_absl()
absl.flags.FLAGS(sys.argv)
+ self.complete_absl_config(absl.flags)
already_configured_with_absl = True
|
{"golden_diff": "diff --git a/jax/config.py b/jax/config.py\n--- a/jax/config.py\n+++ b/jax/config.py\n@@ -73,6 +73,8 @@\n flag_type, meta_args, meta_kwargs = self.meta[name]\n absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)\n \n+ app.call_after_init(lambda: self.complete_absl_config(absl_flags))\n+\n def complete_absl_config(self, absl_flags):\n for name, _ in self.values.items():\n self.update(name, getattr(absl_flags.FLAGS, name))\n@@ -83,6 +85,7 @@\n import absl.flags\n self.config_with_absl()\n absl.flags.FLAGS(sys.argv)\n+ self.complete_absl_config(absl.flags)\n already_configured_with_absl = True\n", "issue": "jax.config doesn't report command-line flags correctly\nAnother one from @jmgilmer and I - I don't think the jax config is parsing command line flags correctly. I don't know if this is functionally important or just a reporting error - but it is certainly important for user scripts knowing what flags have been set.\r\n\r\nIf I run this script:\r\n```\r\nfrom absl import app, flags\r\nfrom jax.config import config\r\nFLAGS = flags.FLAGS\r\ndef main(_):\r\n print(\"FLAGS.jax_enable_x64\", FLAGS.jax_enable_x64)\r\n print(\"FLAGS.jax_debug_nans\", FLAGS.jax_debug_nans)\r\n print(config.values)\r\nif __name__ == \"__main__\":\r\n config.config_with_absl()\r\n app.run(main)\r\n```\r\nI get the following problem: jax.config doesn't report the correct flag settings.\r\n```\r\n> python jaxtest.py --jax_enable_x64=1 --jax_debug_nans=1\r\nFLAGS.jax_enable_x64 True\r\nFLAGS.jax_debug_nans True\r\n{'jax_enable_x64': 0, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 0, 'jax_disable_jit': 0}\r\n```\r\nif I run the same with envvars instead it works:\r\n```\r\nJAX_ENABLE_X64=1 JAX_DEBUG_NANS=1 python jaxtest.py\r\nFLAGS.jax_enable_x64 True\r\nFLAGS.jax_debug_nans True\r\n{'jax_enable_x64': 1, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 1, 'jax_disable_jit': 0}\r\n```\r\n\r\nI've tried parsing the flags in different ways but nothing seems to fix the issue.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\n\nclass Config(object):\n def __init__(self):\n self.values = {}\n self.meta = {}\n self.FLAGS = NameSpace(self.read)\n self.use_absl = False\n\n def update(self, name, val):\n self.check_exists(name)\n if name not in self.values:\n raise Exception(\"Unrecognized config option: {}\".format(name))\n self.values[name] = val\n\n def read(self, name):\n if self.use_absl:\n return getattr(self.absl_flags.FLAGS, name)\n else:\n self.check_exists(name)\n return self.values[name]\n\n def add_option(self, name, default, opt_type, meta_args, meta_kwargs):\n if name in self.values:\n raise Exception(\"Config option {} already defined\".format(name))\n self.values[name] = default\n self.meta[name] = (opt_type, meta_args, meta_kwargs)\n\n def check_exists(self, name):\n if name not in self.values:\n raise Exception(\"Unrecognized config option: {}\".format(name))\n\n def DEFINE_bool(self, name, default, *args, **kwargs):\n self.add_option(name, default, bool, args, kwargs)\n\n def DEFINE_integer(self, name, default, *args, **kwargs):\n self.add_option(name, default, int, args, kwargs)\n\n def DEFINE_string(self, name, default, *args, **kwargs):\n self.add_option(name, default, str, args, kwargs)\n\n def DEFINE_enum(self, name, default, *args, **kwargs):\n self.add_option(name, default, 'enum', args, kwargs)\n\n def config_with_absl(self):\n # Run this before calling `app.run(main)` etc\n import absl.flags as absl_FLAGS\n from absl import app, flags as absl_flags\n\n self.use_absl = True\n self.absl_flags = absl_flags\n absl_defs = { bool: absl_flags.DEFINE_bool,\n int: absl_flags.DEFINE_integer,\n str: absl_flags.DEFINE_string,\n 'enum': absl_flags.DEFINE_enum }\n\n for name, val in self.values.items():\n flag_type, meta_args, meta_kwargs = self.meta[name]\n absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)\n\n def complete_absl_config(self, absl_flags):\n for name, _ in self.values.items():\n self.update(name, getattr(absl_flags.FLAGS, name))\n\n def parse_flags_with_absl(self):\n global already_configured_with_absl\n if not already_configured_with_absl:\n import absl.flags\n self.config_with_absl()\n absl.flags.FLAGS(sys.argv)\n already_configured_with_absl = True\n\n\nclass NameSpace(object):\n def __init__(self, getter):\n self._getter = getter\n\n def __getattr__(self, name):\n return self._getter(name)\n\n\nconfig = Config()\nflags = config\nalready_configured_with_absl = False\n", "path": "jax/config.py"}]}
| 1,917 | 180 |
gh_patches_debug_15851
|
rasdani/github-patches
|
git_diff
|
lk-geimfari__mimesis-875
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add pricetag support
# Feature request
## Thesis
Create a localized pricetag:
```
>>> mimesis.Business().pricetag()
"$ 12.34"
>>> mimesis.Business("de").pricetag(minimum=1000, maximum=1500)
"1.234,56 €"
>>> mimesis.Business("cs").pricetag()
"75,20 Kč" # rounded to 0.1
```
with defaults minimum=0.01 and maximum=1000.00 as a proposal for the default provider. Localized providers would define their own values, because of huge differences between the used values, such as USD/EUR/GBP going down to 0.01, and some minor currencies using millions as base.
## Reasoning
This is very locale dependent. Currency is already in the `CURRENCY_SYMBOLS`, but what would be the best place for localized number and price formatting as well as the localized defaults for minimum/maximum?
</issue>
<code>
[start of mimesis/providers/business.py]
1 # -*- coding: utf-8 -*-
2
3 """Business data provider."""
4
5 from mimesis.data import (
6 CRYPTOCURRENCY_ISO_CODES,
7 CRYPTOCURRENCY_SYMBOLS,
8 CURRENCY_ISO_CODES,
9 CURRENCY_SYMBOLS,
10 )
11 from mimesis.providers.base import BaseDataProvider
12
13 __all__ = ['Business']
14
15
16 class Business(BaseDataProvider):
17 """Class for generating data for business."""
18
19 def __init__(self, *args, **kwargs):
20 """Initialize attributes.
21
22 :param locale: Current locale.
23 """
24 super().__init__(*args, **kwargs)
25 self._datafile = 'business.json'
26 self.pull(self._datafile)
27
28 class Meta:
29 """Class for metadata."""
30
31 name = 'business'
32
33 def company(self) -> str:
34 """Get a random company name.
35
36 :return: Company name.
37 """
38 return self.random.choice(self._data['company']['name'])
39
40 def company_type(self, abbr: bool = False) -> str:
41 """Get a random type of business entity.
42
43 :param abbr: Abbreviated company type.
44 :return: Types of business entity.
45 """
46 key = 'abbr' if abbr else 'title'
47 return self.random.choice(
48 self._data['company']['type'][key],
49 )
50
51 def copyright(self) -> str: # noqa: A003
52 """Generate a random copyright.
53
54 :return: Copyright of company.
55 """
56 return '© {}, {}'.format(
57 self.company(),
58 self.company_type(abbr=True),
59 )
60
61 def currency_iso_code(self, allow_random: bool = False) -> str:
62 """Get code of the currency for current locale.
63
64 :param allow_random: Get a random ISO code.
65 :return: Currency code.
66 """
67 if allow_random:
68 return self.random.choice(CURRENCY_ISO_CODES)
69 else:
70 return self._data['currency-code']
71
72 def cryptocurrency_iso_code(self) -> str:
73 """Get symbol of random cryptocurrency.
74
75 :return: Symbol of cryptocurrency.
76 """
77 return self.random.choice(CRYPTOCURRENCY_ISO_CODES)
78
79 def currency_symbol(self):
80 """Get a currency symbol for current locale.
81
82 :return: Currency symbol.
83 """
84 return CURRENCY_SYMBOLS[self.locale]
85
86 def cryptocurrency_symbol(self) -> str:
87 """Get a cryptocurrency symbol.
88
89 :return: Symbol of cryptocurrency.
90 """
91 return self.random.choice(CRYPTOCURRENCY_SYMBOLS)
92
93 def price(self, minimum: float = 10.00,
94 maximum: float = 1000.00) -> str:
95 """Generate a random price.
96
97 :param minimum: Max value of price.
98 :param maximum: Min value of price.
99 :return: Price.
100 """
101 price = self.random.uniform(minimum, maximum, precision=2)
102 return '{0} {1}'.format(price, self.currency_symbol())
103
104 def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
105 """Generate random price in BTC.
106
107 :param minimum: Minimum value of price.
108 :param maximum: Maximum value of price.
109 :return: Price in BTC.
110 """
111 return '{} BTC'.format(
112 self.random.uniform(
113 minimum,
114 maximum,
115 precision=7,
116 ),
117 )
118
[end of mimesis/providers/business.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mimesis/providers/business.py b/mimesis/providers/business.py
--- a/mimesis/providers/business.py
+++ b/mimesis/providers/business.py
@@ -98,8 +98,19 @@
:param maximum: Min value of price.
:return: Price.
"""
- price = self.random.uniform(minimum, maximum, precision=2)
- return '{0} {1}'.format(price, self.currency_symbol())
+ price_format = self._data['price-format']
+ numeric_frac_digits = self._data['numeric-frac-digits']
+ delims = {
+ '.': self._data['numeric-decimal'],
+ ',': self._data['numeric-thousands'],
+ }
+
+ value = self.random.uniform(minimum, maximum)
+ price = '{:,.{}f}'.format(value, numeric_frac_digits)
+
+ price = ''.join(delims.get(char, char) for char in price)
+
+ return price_format.replace('#', price)
def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
"""Generate random price in BTC.
|
{"golden_diff": "diff --git a/mimesis/providers/business.py b/mimesis/providers/business.py\n--- a/mimesis/providers/business.py\n+++ b/mimesis/providers/business.py\n@@ -98,8 +98,19 @@\n :param maximum: Min value of price.\n :return: Price.\n \"\"\"\n- price = self.random.uniform(minimum, maximum, precision=2)\n- return '{0} {1}'.format(price, self.currency_symbol())\n+ price_format = self._data['price-format']\n+ numeric_frac_digits = self._data['numeric-frac-digits']\n+ delims = {\n+ '.': self._data['numeric-decimal'],\n+ ',': self._data['numeric-thousands'],\n+ }\n+\n+ value = self.random.uniform(minimum, maximum)\n+ price = '{:,.{}f}'.format(value, numeric_frac_digits)\n+\n+ price = ''.join(delims.get(char, char) for char in price)\n+\n+ return price_format.replace('#', price)\n \n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n", "issue": "Add pricetag support\n# Feature request\r\n## Thesis\r\n\r\nCreate a localized pricetag:\r\n\r\n```\r\n>>> mimesis.Business().pricetag()\r\n\"$ 12.34\"\r\n>>> mimesis.Business(\"de\").pricetag(minimum=1000, maximum=1500)\r\n\"1.234,56 \u20ac\"\r\n>>> mimesis.Business(\"cs\").pricetag()\r\n\"75,20 K\u010d\" # rounded to 0.1\r\n```\r\nwith defaults minimum=0.01 and maximum=1000.00 as a proposal for the default provider. Localized providers would define their own values, because of huge differences between the used values, such as USD/EUR/GBP going down to 0.01, and some minor currencies using millions as base.\r\n\r\n## Reasoning\r\n\r\nThis is very locale dependent. Currency is already in the `CURRENCY_SYMBOLS`, but what would be the best place for localized number and price formatting as well as the localized defaults for minimum/maximum?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Business data provider.\"\"\"\n\nfrom mimesis.data import (\n CRYPTOCURRENCY_ISO_CODES,\n CRYPTOCURRENCY_SYMBOLS,\n CURRENCY_ISO_CODES,\n CURRENCY_SYMBOLS,\n)\nfrom mimesis.providers.base import BaseDataProvider\n\n__all__ = ['Business']\n\n\nclass Business(BaseDataProvider):\n \"\"\"Class for generating data for business.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param locale: Current locale.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._datafile = 'business.json'\n self.pull(self._datafile)\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'business'\n\n def company(self) -> str:\n \"\"\"Get a random company name.\n\n :return: Company name.\n \"\"\"\n return self.random.choice(self._data['company']['name'])\n\n def company_type(self, abbr: bool = False) -> str:\n \"\"\"Get a random type of business entity.\n\n :param abbr: Abbreviated company type.\n :return: Types of business entity.\n \"\"\"\n key = 'abbr' if abbr else 'title'\n return self.random.choice(\n self._data['company']['type'][key],\n )\n\n def copyright(self) -> str: # noqa: A003\n \"\"\"Generate a random copyright.\n\n :return: Copyright of company.\n \"\"\"\n return '\u00a9 {}, {}'.format(\n self.company(),\n self.company_type(abbr=True),\n )\n\n def currency_iso_code(self, allow_random: bool = False) -> str:\n \"\"\"Get code of the currency for current locale.\n\n :param allow_random: Get a random ISO code.\n :return: Currency code.\n \"\"\"\n if allow_random:\n return self.random.choice(CURRENCY_ISO_CODES)\n else:\n return self._data['currency-code']\n\n def cryptocurrency_iso_code(self) -> str:\n \"\"\"Get symbol of random cryptocurrency.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_ISO_CODES)\n\n def currency_symbol(self):\n \"\"\"Get a currency symbol for current locale.\n\n :return: Currency symbol.\n \"\"\"\n return CURRENCY_SYMBOLS[self.locale]\n\n def cryptocurrency_symbol(self) -> str:\n \"\"\"Get a cryptocurrency symbol.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_SYMBOLS)\n\n def price(self, minimum: float = 10.00,\n maximum: float = 1000.00) -> str:\n \"\"\"Generate a random price.\n\n :param minimum: Max value of price.\n :param maximum: Min value of price.\n :return: Price.\n \"\"\"\n price = self.random.uniform(minimum, maximum, precision=2)\n return '{0} {1}'.format(price, self.currency_symbol())\n\n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n\n :param minimum: Minimum value of price.\n :param maximum: Maximum value of price.\n :return: Price in BTC.\n \"\"\"\n return '{} BTC'.format(\n self.random.uniform(\n minimum,\n maximum,\n precision=7,\n ),\n )\n", "path": "mimesis/providers/business.py"}]}
| 1,736 | 254 |
gh_patches_debug_26099
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1044
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gpu_info crashes because it cannot parse "N/A"
## 🐛 Bug description
When trying to use gpu_info, it throws:
```
File "/home/blackhc/anaconda3/envs/hello-mnist/lib/python3.7/site-packages/ignite/contrib/metrics/gpu_info.py", line 91, in completed
engine.state.metrics[util_name] = int(util_report['gpu_util'])
ValueError: invalid literal for int() with base 10: 'N/A'
```
There is error handling code above it, but it does not catch the issue ("N/A" is returned).
I assume my GPU does not support it. However, it would be neat to have a graceful failure mode.
Thank you!
Andreas
## Environment
torch 1.5 on a GTX 780 TI (source)
ignite 0.3.0 (conda)
pynvml 8.0.4 (pip)
</issue>
<code>
[start of ignite/contrib/metrics/gpu_info.py]
1 # -*- coding: utf-8 -*-
2 import warnings
3
4 import torch
5
6 from ignite.engine import Events
7 from ignite.metrics import Metric
8
9
10 class GpuInfo(Metric):
11 """Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
12 on each iterations.
13
14 Examples:
15
16 .. code-block:: python
17
18 # Default GPU measurements
19 GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'
20
21 # Logging with TQDM
22 ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])
23 # Progress bar will looks like
24 # Epoch [2/10]: [12/24] 50%|█████ , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]
25
26 # Logging with Tensorboard
27 tb_logger.attach(trainer,
28 log_handler=OutputHandler(tag="training", metric_names='all'),
29 event_name=Events.ITERATION_COMPLETED)
30 """
31
32 def __init__(self):
33 try:
34 import pynvml
35 except ImportError:
36 raise RuntimeError(
37 "This contrib module requires pynvml to be installed. "
38 "Please install it with command: \n pip install pynvml"
39 )
40 # Let's check available devices
41 if not torch.cuda.is_available():
42 raise RuntimeError("This contrib module requires available GPU")
43
44 from pynvml.smi import nvidia_smi
45
46 # Let it fail if no libnvidia drivers or NMVL library found
47 self.nvsmi = nvidia_smi.getInstance()
48 super(GpuInfo, self).__init__()
49
50 def reset(self):
51 pass
52
53 def update(self, output):
54 pass
55
56 def compute(self):
57 data = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu")
58 if len(data) == 0 or ("gpu" not in data):
59 warnings.warn("No GPU information available")
60 return []
61 return data["gpu"]
62
63 def completed(self, engine, name):
64 data = self.compute()
65 if len(data) < 1:
66 warnings.warn("No GPU information available")
67 return
68
69 for i, data_by_rank in enumerate(data):
70 mem_name = "{}:{} mem(%)".format(name, i)
71
72 if "fb_memory_usage" not in data_by_rank:
73 warnings.warn("No GPU memory usage information available in {}".format(data_by_rank))
74 continue
75 mem_report = data_by_rank["fb_memory_usage"]
76 if not ("used" in mem_report and "total" in mem_report):
77 warnings.warn(
78 "GPU memory usage information does not provide used/total "
79 "memory consumption information in {}".format(mem_report)
80 )
81 continue
82
83 engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"])
84
85 for i, data_by_rank in enumerate(data):
86 util_name = "{}:{} util(%)".format(name, i)
87 if "utilization" not in data_by_rank:
88 warnings.warn("No GPU utilization information available in {}".format(data_by_rank))
89 continue
90 util_report = data_by_rank["utilization"]
91 if not ("gpu_util" in util_report):
92 warnings.warn(
93 "GPU utilization information does not provide 'gpu_util' information in " "{}".format(util_report)
94 )
95 continue
96
97 engine.state.metrics[util_name] = int(util_report["gpu_util"])
98
99 def attach(self, engine, name="gpu", event_name=Events.ITERATION_COMPLETED):
100 engine.add_event_handler(event_name, self.completed, name)
101
[end of ignite/contrib/metrics/gpu_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py
--- a/ignite/contrib/metrics/gpu_info.py
+++ b/ignite/contrib/metrics/gpu_info.py
@@ -11,6 +11,10 @@
"""Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
on each iterations.
+ .. Note ::
+
+ In case if gpu utilization reports "N/A" on a given GPU, corresponding metric value is not set.
+
Examples:
.. code-block:: python
@@ -90,11 +94,14 @@
util_report = data_by_rank["utilization"]
if not ("gpu_util" in util_report):
warnings.warn(
- "GPU utilization information does not provide 'gpu_util' information in " "{}".format(util_report)
+ "GPU utilization information does not provide 'gpu_util' information in {}".format(util_report)
)
continue
-
- engine.state.metrics[util_name] = int(util_report["gpu_util"])
+ try:
+ engine.state.metrics[util_name] = int(util_report["gpu_util"])
+ except ValueError:
+ # Do not set GPU utilization information
+ pass
def attach(self, engine, name="gpu", event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(event_name, self.completed, name)
|
{"golden_diff": "diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py\n--- a/ignite/contrib/metrics/gpu_info.py\n+++ b/ignite/contrib/metrics/gpu_info.py\n@@ -11,6 +11,10 @@\n \"\"\"Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric\n on each iterations.\n \n+ .. Note ::\n+\n+ In case if gpu utilization reports \"N/A\" on a given GPU, corresponding metric value is not set.\n+\n Examples:\n \n .. code-block:: python\n@@ -90,11 +94,14 @@\n util_report = data_by_rank[\"utilization\"]\n if not (\"gpu_util\" in util_report):\n warnings.warn(\n- \"GPU utilization information does not provide 'gpu_util' information in \" \"{}\".format(util_report)\n+ \"GPU utilization information does not provide 'gpu_util' information in {}\".format(util_report)\n )\n continue\n-\n- engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n+ try:\n+ engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n+ except ValueError:\n+ # Do not set GPU utilization information\n+ pass\n \n def attach(self, engine, name=\"gpu\", event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(event_name, self.completed, name)\n", "issue": "gpu_info crashes because it cannot parse \"N/A\"\n## \ud83d\udc1b Bug description\r\n\r\nWhen trying to use gpu_info, it throws:\r\n\r\n```\r\n File \"/home/blackhc/anaconda3/envs/hello-mnist/lib/python3.7/site-packages/ignite/contrib/metrics/gpu_info.py\", line 91, in completed\r\n engine.state.metrics[util_name] = int(util_report['gpu_util'])\r\nValueError: invalid literal for int() with base 10: 'N/A'\r\n```\r\n\r\nThere is error handling code above it, but it does not catch the issue (\"N/A\" is returned).\r\n\r\nI assume my GPU does not support it. However, it would be neat to have a graceful failure mode.\r\n\r\nThank you!\r\nAndreas\r\n\r\n## Environment\r\n\r\ntorch 1.5 on a GTX 780 TI (source)\r\nignite 0.3.0 (conda)\r\npynvml 8.0.4 (pip)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\n\n\nclass GpuInfo(Metric):\n \"\"\"Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric\n on each iterations.\n\n Examples:\n\n .. code-block:: python\n\n # Default GPU measurements\n GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'\n\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])\n # Progress bar will looks like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]\n\n # Logging with Tensorboard\n tb_logger.attach(trainer,\n log_handler=OutputHandler(tag=\"training\", metric_names='all'),\n event_name=Events.ITERATION_COMPLETED)\n \"\"\"\n\n def __init__(self):\n try:\n import pynvml\n except ImportError:\n raise RuntimeError(\n \"This contrib module requires pynvml to be installed. \"\n \"Please install it with command: \\n pip install pynvml\"\n )\n # Let's check available devices\n if not torch.cuda.is_available():\n raise RuntimeError(\"This contrib module requires available GPU\")\n\n from pynvml.smi import nvidia_smi\n\n # Let it fail if no libnvidia drivers or NMVL library found\n self.nvsmi = nvidia_smi.getInstance()\n super(GpuInfo, self).__init__()\n\n def reset(self):\n pass\n\n def update(self, output):\n pass\n\n def compute(self):\n data = self.nvsmi.DeviceQuery(\"memory.used, memory.total, utilization.gpu\")\n if len(data) == 0 or (\"gpu\" not in data):\n warnings.warn(\"No GPU information available\")\n return []\n return data[\"gpu\"]\n\n def completed(self, engine, name):\n data = self.compute()\n if len(data) < 1:\n warnings.warn(\"No GPU information available\")\n return\n\n for i, data_by_rank in enumerate(data):\n mem_name = \"{}:{} mem(%)\".format(name, i)\n\n if \"fb_memory_usage\" not in data_by_rank:\n warnings.warn(\"No GPU memory usage information available in {}\".format(data_by_rank))\n continue\n mem_report = data_by_rank[\"fb_memory_usage\"]\n if not (\"used\" in mem_report and \"total\" in mem_report):\n warnings.warn(\n \"GPU memory usage information does not provide used/total \"\n \"memory consumption information in {}\".format(mem_report)\n )\n continue\n\n engine.state.metrics[mem_name] = int(mem_report[\"used\"] * 100.0 / mem_report[\"total\"])\n\n for i, data_by_rank in enumerate(data):\n util_name = \"{}:{} util(%)\".format(name, i)\n if \"utilization\" not in data_by_rank:\n warnings.warn(\"No GPU utilization information available in {}\".format(data_by_rank))\n continue\n util_report = data_by_rank[\"utilization\"]\n if not (\"gpu_util\" in util_report):\n warnings.warn(\n \"GPU utilization information does not provide 'gpu_util' information in \" \"{}\".format(util_report)\n )\n continue\n\n engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n\n def attach(self, engine, name=\"gpu\", event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/contrib/metrics/gpu_info.py"}]}
| 1,771 | 312 |
gh_patches_debug_30950
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-2508
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
linux.wait_for_ssh action improvements
Currently `linux.wait_for_ssh` action (https://github.com/StackStorm/st2/blob/master/contrib/linux/actions/wait_for_ssh.yaml) only supports authenticating with RSA SSH key.
There are multiple ways we can improve this action and make it more generic.
- Support for other key types
- Support for password based authentication
- Refactor the action to utilize the new Paramiko SSH client
- Default to using system user (stanley) username and key file. Ideally key file location should be read from the config
</issue>
<code>
[start of contrib/linux/actions/wait_for_ssh.py]
1 #!/usr/bin/env python
2
3 import time
4
5 import paramiko
6
7 from st2actions.runners.pythonrunner import Action
8
9
10 class BaseAction(Action):
11 def run(self, keyfile, username, hostname, ssh_timeout, retries):
12 key = paramiko.RSAKey.from_private_key_file(keyfile)
13 client = paramiko.SSHClient()
14 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
15
16 for index in range(retries):
17 attempt = index + 1
18
19 try:
20 self.logger.debug('SSH connection attempt: %s' % (attempt))
21 client.connect(hostname=hostname, username=username, pkey=key)
22 return True
23 except Exception as e:
24 self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))
25 time.sleep(ssh_timeout)
26
27 raise Exception('Exceeded max retries (%s)' % (retries))
28
[end of contrib/linux/actions/wait_for_ssh.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/contrib/linux/actions/wait_for_ssh.py b/contrib/linux/actions/wait_for_ssh.py
--- a/contrib/linux/actions/wait_for_ssh.py
+++ b/contrib/linux/actions/wait_for_ssh.py
@@ -2,26 +2,36 @@
import time
-import paramiko
+from oslo_config import cfg
from st2actions.runners.pythonrunner import Action
+from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient
class BaseAction(Action):
- def run(self, keyfile, username, hostname, ssh_timeout, retries):
- key = paramiko.RSAKey.from_private_key_file(keyfile)
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ def run(self, hostname, port, username, password=None, keyfile=None, ssh_timeout=5,
+ sleep_delay=20, retries=10):
+ # Note: If neither password nor key file is provided, we try to use system user
+ # key file
+ if not password and not keyfile:
+ keyfile = cfg.CONF.system_user.ssh_key_file
+ self.logger.info('Neither "password" nor "keyfile" parameter provided, '
+ 'defaulting to using "%s" key file' % (keyfile))
+
+ client = ParamikoSSHClient(hostname=hostname, port=port, username=username,
+ password=password, key_files=keyfile,
+ timeout=ssh_timeout)
for index in range(retries):
attempt = index + 1
try:
self.logger.debug('SSH connection attempt: %s' % (attempt))
- client.connect(hostname=hostname, username=username, pkey=key)
+ client.connect()
return True
except Exception as e:
- self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))
- time.sleep(ssh_timeout)
+ self.logger.info('Attempt %s failed (%s), sleeping for %s seconds...' %
+ (attempt, str(e), sleep_delay))
+ time.sleep(sleep_delay)
raise Exception('Exceeded max retries (%s)' % (retries))
|
{"golden_diff": "diff --git a/contrib/linux/actions/wait_for_ssh.py b/contrib/linux/actions/wait_for_ssh.py\n--- a/contrib/linux/actions/wait_for_ssh.py\n+++ b/contrib/linux/actions/wait_for_ssh.py\n@@ -2,26 +2,36 @@\n \n import time\n \n-import paramiko\n+from oslo_config import cfg\n \n from st2actions.runners.pythonrunner import Action\n+from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient\n \n \n class BaseAction(Action):\n- def run(self, keyfile, username, hostname, ssh_timeout, retries):\n- key = paramiko.RSAKey.from_private_key_file(keyfile)\n- client = paramiko.SSHClient()\n- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n+ def run(self, hostname, port, username, password=None, keyfile=None, ssh_timeout=5,\n+ sleep_delay=20, retries=10):\n+ # Note: If neither password nor key file is provided, we try to use system user\n+ # key file\n+ if not password and not keyfile:\n+ keyfile = cfg.CONF.system_user.ssh_key_file\n+ self.logger.info('Neither \"password\" nor \"keyfile\" parameter provided, '\n+ 'defaulting to using \"%s\" key file' % (keyfile))\n+\n+ client = ParamikoSSHClient(hostname=hostname, port=port, username=username,\n+ password=password, key_files=keyfile,\n+ timeout=ssh_timeout)\n \n for index in range(retries):\n attempt = index + 1\n \n try:\n self.logger.debug('SSH connection attempt: %s' % (attempt))\n- client.connect(hostname=hostname, username=username, pkey=key)\n+ client.connect()\n return True\n except Exception as e:\n- self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))\n- time.sleep(ssh_timeout)\n+ self.logger.info('Attempt %s failed (%s), sleeping for %s seconds...' %\n+ (attempt, str(e), sleep_delay))\n+ time.sleep(sleep_delay)\n \n raise Exception('Exceeded max retries (%s)' % (retries))\n", "issue": "linux.wait_for_ssh action improvements\nCurrently `linux.wait_for_ssh` action (https://github.com/StackStorm/st2/blob/master/contrib/linux/actions/wait_for_ssh.yaml) only supports authenticating with RSA SSH key.\n\nThere are multiple ways we can improve this action and make it more generic.\n- Support for other key types\n- Support for password based authentication\n- Refactor the action to utilize the new Paramiko SSH client\n- Default to using system user (stanley) username and key file. Ideally key file location should be read from the config\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport time\n\nimport paramiko\n\nfrom st2actions.runners.pythonrunner import Action\n\n\nclass BaseAction(Action):\n def run(self, keyfile, username, hostname, ssh_timeout, retries):\n key = paramiko.RSAKey.from_private_key_file(keyfile)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n for index in range(retries):\n attempt = index + 1\n\n try:\n self.logger.debug('SSH connection attempt: %s' % (attempt))\n client.connect(hostname=hostname, username=username, pkey=key)\n return True\n except Exception as e:\n self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))\n time.sleep(ssh_timeout)\n\n raise Exception('Exceeded max retries (%s)' % (retries))\n", "path": "contrib/linux/actions/wait_for_ssh.py"}]}
| 894 | 477 |
gh_patches_debug_31670
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-2578
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certificates not created with `mkcert` or `trustme` raise a RuntimeError
The `CertLoader` class in `sanic-org/sanic/sanic/worker/loader.py` checks the creator of the certificate. If the creator is not `mkcert` or `trustme` then it raises a `RuntimeError`. This will prevent Sanic from running with certificates from any other sources.
</issue>
<code>
[start of sanic/worker/loader.py]
1 from __future__ import annotations
2
3 import os
4 import sys
5
6 from importlib import import_module
7 from pathlib import Path
8 from typing import (
9 TYPE_CHECKING,
10 Any,
11 Callable,
12 Dict,
13 Optional,
14 Type,
15 Union,
16 cast,
17 )
18
19 from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator
20
21
22 if TYPE_CHECKING:
23 from sanic import Sanic as SanicApp
24
25
26 class AppLoader:
27 def __init__(
28 self,
29 module_input: str = "",
30 as_factory: bool = False,
31 as_simple: bool = False,
32 args: Any = None,
33 factory: Optional[Callable[[], SanicApp]] = None,
34 ) -> None:
35 self.module_input = module_input
36 self.module_name = ""
37 self.app_name = ""
38 self.as_factory = as_factory
39 self.as_simple = as_simple
40 self.args = args
41 self.factory = factory
42 self.cwd = os.getcwd()
43
44 if module_input:
45 delimiter = ":" if ":" in module_input else "."
46 if module_input.count(delimiter):
47 module_name, app_name = module_input.rsplit(delimiter, 1)
48 self.module_name = module_name
49 self.app_name = app_name
50 if self.app_name.endswith("()"):
51 self.as_factory = True
52 self.app_name = self.app_name[:-2]
53
54 def load(self) -> SanicApp:
55 module_path = os.path.abspath(self.cwd)
56 if module_path not in sys.path:
57 sys.path.append(module_path)
58
59 if self.factory:
60 return self.factory()
61 else:
62 from sanic.app import Sanic
63 from sanic.simple import create_simple_server
64
65 if self.as_simple:
66 path = Path(self.module_input)
67 app = create_simple_server(path)
68 else:
69 if self.module_name == "" and os.path.isdir(self.module_input):
70 raise ValueError(
71 "App not found.\n"
72 " Please use --simple if you are passing a "
73 "directory to sanic.\n"
74 f" eg. sanic {self.module_input} --simple"
75 )
76
77 module = import_module(self.module_name)
78 app = getattr(module, self.app_name, None)
79 if self.as_factory:
80 try:
81 app = app(self.args)
82 except TypeError:
83 app = app()
84
85 app_type_name = type(app).__name__
86
87 if (
88 not isinstance(app, Sanic)
89 and self.args
90 and hasattr(self.args, "module")
91 ):
92 if callable(app):
93 solution = f"sanic {self.args.module} --factory"
94 raise ValueError(
95 "Module is not a Sanic app, it is a "
96 f"{app_type_name}\n"
97 " If this callable returns a "
98 f"Sanic instance try: \n{solution}"
99 )
100
101 raise ValueError(
102 f"Module is not a Sanic app, it is a {app_type_name}\n"
103 f" Perhaps you meant {self.args.module}:app?"
104 )
105 return app
106
107
108 class CertLoader:
109 _creator_class: Type[CertCreator]
110
111 def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):
112 creator_name = ssl_data.get("creator")
113 if creator_name not in ("mkcert", "trustme"):
114 raise RuntimeError(f"Unknown certificate creator: {creator_name}")
115 elif creator_name == "mkcert":
116 self._creator_class = MkcertCreator
117 elif creator_name == "trustme":
118 self._creator_class = TrustmeCreator
119
120 self._key = ssl_data["key"]
121 self._cert = ssl_data["cert"]
122 self._localhost = cast(str, ssl_data["localhost"])
123
124 def load(self, app: SanicApp):
125 creator = self._creator_class(app, self._key, self._cert)
126 return creator.generate_cert(self._localhost)
127
[end of sanic/worker/loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sanic/worker/loader.py b/sanic/worker/loader.py
--- a/sanic/worker/loader.py
+++ b/sanic/worker/loader.py
@@ -5,18 +5,10 @@
from importlib import import_module
from pathlib import Path
-from typing import (
- TYPE_CHECKING,
- Any,
- Callable,
- Dict,
- Optional,
- Type,
- Union,
- cast,
-)
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast
-from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator
+from sanic.http.tls.context import process_to_context
+from sanic.http.tls.creators import MkcertCreator, TrustmeCreator
if TYPE_CHECKING:
@@ -106,21 +98,30 @@
class CertLoader:
- _creator_class: Type[CertCreator]
+ _creators = {
+ "mkcert": MkcertCreator,
+ "trustme": TrustmeCreator,
+ }
def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):
- creator_name = ssl_data.get("creator")
- if creator_name not in ("mkcert", "trustme"):
+ self._ssl_data = ssl_data
+
+ creator_name = cast(str, ssl_data.get("creator"))
+
+ self._creator_class = self._creators.get(creator_name)
+ if not creator_name:
+ return
+
+ if not self._creator_class:
raise RuntimeError(f"Unknown certificate creator: {creator_name}")
- elif creator_name == "mkcert":
- self._creator_class = MkcertCreator
- elif creator_name == "trustme":
- self._creator_class = TrustmeCreator
self._key = ssl_data["key"]
self._cert = ssl_data["cert"]
self._localhost = cast(str, ssl_data["localhost"])
def load(self, app: SanicApp):
+ if not self._creator_class:
+ return process_to_context(self._ssl_data)
+
creator = self._creator_class(app, self._key, self._cert)
return creator.generate_cert(self._localhost)
|
{"golden_diff": "diff --git a/sanic/worker/loader.py b/sanic/worker/loader.py\n--- a/sanic/worker/loader.py\n+++ b/sanic/worker/loader.py\n@@ -5,18 +5,10 @@\n \n from importlib import import_module\n from pathlib import Path\n-from typing import (\n- TYPE_CHECKING,\n- Any,\n- Callable,\n- Dict,\n- Optional,\n- Type,\n- Union,\n- cast,\n-)\n+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast\n \n-from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator\n+from sanic.http.tls.context import process_to_context\n+from sanic.http.tls.creators import MkcertCreator, TrustmeCreator\n \n \n if TYPE_CHECKING:\n@@ -106,21 +98,30 @@\n \n \n class CertLoader:\n- _creator_class: Type[CertCreator]\n+ _creators = {\n+ \"mkcert\": MkcertCreator,\n+ \"trustme\": TrustmeCreator,\n+ }\n \n def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):\n- creator_name = ssl_data.get(\"creator\")\n- if creator_name not in (\"mkcert\", \"trustme\"):\n+ self._ssl_data = ssl_data\n+\n+ creator_name = cast(str, ssl_data.get(\"creator\"))\n+\n+ self._creator_class = self._creators.get(creator_name)\n+ if not creator_name:\n+ return\n+\n+ if not self._creator_class:\n raise RuntimeError(f\"Unknown certificate creator: {creator_name}\")\n- elif creator_name == \"mkcert\":\n- self._creator_class = MkcertCreator\n- elif creator_name == \"trustme\":\n- self._creator_class = TrustmeCreator\n \n self._key = ssl_data[\"key\"]\n self._cert = ssl_data[\"cert\"]\n self._localhost = cast(str, ssl_data[\"localhost\"])\n \n def load(self, app: SanicApp):\n+ if not self._creator_class:\n+ return process_to_context(self._ssl_data)\n+\n creator = self._creator_class(app, self._key, self._cert)\n return creator.generate_cert(self._localhost)\n", "issue": "Certificates not created with `mkcert` or `trustme` raise a RuntimeError\nThe `CertLoader` class in `sanic-org/sanic/sanic/worker/loader.py` checks the creator of the certificate. If the creator is not `mkcert` or `trustme` then it raises a `RuntimeError`. This will prevent Sanic from running with certificates from any other sources.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Optional,\n Type,\n Union,\n cast,\n)\n\nfrom sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator\n\n\nif TYPE_CHECKING:\n from sanic import Sanic as SanicApp\n\n\nclass AppLoader:\n def __init__(\n self,\n module_input: str = \"\",\n as_factory: bool = False,\n as_simple: bool = False,\n args: Any = None,\n factory: Optional[Callable[[], SanicApp]] = None,\n ) -> None:\n self.module_input = module_input\n self.module_name = \"\"\n self.app_name = \"\"\n self.as_factory = as_factory\n self.as_simple = as_simple\n self.args = args\n self.factory = factory\n self.cwd = os.getcwd()\n\n if module_input:\n delimiter = \":\" if \":\" in module_input else \".\"\n if module_input.count(delimiter):\n module_name, app_name = module_input.rsplit(delimiter, 1)\n self.module_name = module_name\n self.app_name = app_name\n if self.app_name.endswith(\"()\"):\n self.as_factory = True\n self.app_name = self.app_name[:-2]\n\n def load(self) -> SanicApp:\n module_path = os.path.abspath(self.cwd)\n if module_path not in sys.path:\n sys.path.append(module_path)\n\n if self.factory:\n return self.factory()\n else:\n from sanic.app import Sanic\n from sanic.simple import create_simple_server\n\n if self.as_simple:\n path = Path(self.module_input)\n app = create_simple_server(path)\n else:\n if self.module_name == \"\" and os.path.isdir(self.module_input):\n raise ValueError(\n \"App not found.\\n\"\n \" Please use --simple if you are passing a \"\n \"directory to sanic.\\n\"\n f\" eg. sanic {self.module_input} --simple\"\n )\n\n module = import_module(self.module_name)\n app = getattr(module, self.app_name, None)\n if self.as_factory:\n try:\n app = app(self.args)\n except TypeError:\n app = app()\n\n app_type_name = type(app).__name__\n\n if (\n not isinstance(app, Sanic)\n and self.args\n and hasattr(self.args, \"module\")\n ):\n if callable(app):\n solution = f\"sanic {self.args.module} --factory\"\n raise ValueError(\n \"Module is not a Sanic app, it is a \"\n f\"{app_type_name}\\n\"\n \" If this callable returns a \"\n f\"Sanic instance try: \\n{solution}\"\n )\n\n raise ValueError(\n f\"Module is not a Sanic app, it is a {app_type_name}\\n\"\n f\" Perhaps you meant {self.args.module}:app?\"\n )\n return app\n\n\nclass CertLoader:\n _creator_class: Type[CertCreator]\n\n def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):\n creator_name = ssl_data.get(\"creator\")\n if creator_name not in (\"mkcert\", \"trustme\"):\n raise RuntimeError(f\"Unknown certificate creator: {creator_name}\")\n elif creator_name == \"mkcert\":\n self._creator_class = MkcertCreator\n elif creator_name == \"trustme\":\n self._creator_class = TrustmeCreator\n\n self._key = ssl_data[\"key\"]\n self._cert = ssl_data[\"cert\"]\n self._localhost = cast(str, ssl_data[\"localhost\"])\n\n def load(self, app: SanicApp):\n creator = self._creator_class(app, self._key, self._cert)\n return creator.generate_cert(self._localhost)\n", "path": "sanic/worker/loader.py"}]}
| 1,746 | 499 |
gh_patches_debug_17120
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-89
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flask integration unicode support
I ran into a small issue where the Flask integration is unable to set tags when they contain unicode characters.
The specific case we have is when unicode was in the url.
https://github.com/DataDog/dd-trace-py/blob/8a9a7a59312de544d9c774e19872eb5e97cd8307/ddtrace/contrib/flask/middleware.py#L119
```
2016-10-28 13:47:58,953 - ERROR - error finishing trace
Traceback (most recent call last):
File "/home/vagrant/.virtualenvs/kennel/local/lib/python2.7/site-packages/ddtrace/contrib/flask/middleware.py", line 150, in _request_finished
self._finish_span(response=response)
File "/home/vagrant/.virtualenvs/kennel/local/lib/python2.7/site-packages/ddtrace/contrib/flask/middleware.py", line 119, in _finish_span
span.set_tag(http.URL, str(request.base_url or ""))
UnicodeEncodeError: 'ascii' codec can't encode character u'\xe8' in position 31: ordinal not in range(128)
```
Happy to submit a PR for a fix, not sure how you would like to go about it, my thinking was:
``` python
from ...compat import stringify
# Use `stringify` instead of `str` throughout the module
span.set_tag(http.URL, stringify(request.base_url or ""))
```
</issue>
<code>
[start of ddtrace/contrib/flask/middleware.py]
1 """
2 Datadog trace code for flask.
3
4 Requires a modern version of flask and the `blinker` library (which is a
5 dependency of flask signals).
6 """
7
8 # stdlib
9 import logging
10
11 # project
12 from ...ext import http, errors, AppTypes
13
14 # 3p
15 import flask.templating
16 from flask import g, request, signals
17
18
19 log = logging.getLogger(__name__)
20
21
22 class TraceMiddleware(object):
23
24 def __init__(self, app, tracer, service="flask", use_signals=True):
25 self.app = app
26 self.app.logger.info("initializing trace middleware")
27
28 # save our traces.
29 self._tracer = tracer
30 self._service = service
31
32 self._tracer.set_service_info(
33 service=service,
34 app="flask",
35 app_type=AppTypes.web,
36 )
37
38 # warn the user if signals are unavailable (because blinker isn't
39 # installed) if they are asking to use them.
40 if use_signals and not signals.signals_available:
41 self.app.logger.info(_blinker_not_installed_msg)
42 self.use_signals = use_signals and signals.signals_available
43
44 # instrument request timings
45 timing_signals = {
46 'request_started': self._request_started,
47 'request_finished': self._request_finished,
48 'got_request_exception': self._request_exception,
49 }
50 if self.use_signals and _signals_exist(timing_signals):
51 self._connect(timing_signals)
52 else:
53 # Fallback to request hooks. Won't catch exceptions.
54 # handle exceptions.
55 self.app.before_request(self._before_request)
56 self.app.after_request(self._after_request)
57
58 # Instrument template rendering. If it's flask >= 0.11, we can use
59 # signals, Otherwise we have to patch a global method.
60 template_signals = {
61 'before_render_template': self._template_started, # added in 0.11
62 'template_rendered': self._template_done
63 }
64 if self.use_signals and _signals_exist(template_signals):
65 self._connect(template_signals)
66 else:
67 _patch_render(tracer)
68
69 def _flask_signals_exist(self, names):
70 """ Return true if the current version of flask has all of the given
71 signals.
72 """
73 return all(getattr(signals, n, None) for n in names)
74
75 def _connect(self, signal_to_handler):
76 connected = True
77 for name, handler in signal_to_handler.items():
78 s = getattr(signals, name, None)
79 if not s:
80 connected = False
81 log.warn("trying to instrument missing signal %s", name)
82 continue
83 s.connect(handler, sender=self.app)
84 return connected
85
86 # common methods
87
88 def _start_span(self):
89 try:
90 # if we have a parent span here, it means something was gone wrong.
91 # might as well clear it out.
92 self._tracer.clear_current_span()
93
94 g.flask_datadog_span = self._tracer.trace(
95 "flask.request",
96 service=self._service,
97 span_type=http.TYPE,
98 )
99 except Exception:
100 self.app.logger.exception("error tracing request")
101
102 def _finish_span(self, response=None, exception=None):
103 """ Close and finish the active span if it exists. """
104 span = getattr(g, 'flask_datadog_span', None)
105 if span:
106 if span.sampled:
107 error = 0
108 code = response.status_code if response else None
109
110 # if we didn't get a response, but we did get an exception, set
111 # codes accordingly.
112 if not response and exception:
113 error = 1
114 code = 500
115 span.set_tag(errors.ERROR_TYPE, type(exception))
116 span.set_tag(errors.ERROR_MSG, exception)
117
118 span.resource = str(request.endpoint or "").lower()
119 span.set_tag(http.URL, str(request.base_url or ""))
120 span.set_tag(http.STATUS_CODE, code)
121 span.error = error
122 span.finish()
123 # Clear our span just in case.
124 g.flask_datadog_span = None
125
126 # Request hook methods
127
128 def _before_request(self):
129 """ Starts tracing the current request and stores it in the global
130 request object.
131 """
132 self._start_span()
133
134 def _after_request(self, response):
135 """ handles a successful response. """
136 try:
137 self._finish_span(response=response)
138 except Exception:
139 self.app.logger.exception("error finishing trace")
140 finally:
141 return response
142
143 # signal handling methods
144
145 def _request_started(self, sender):
146 self._start_span()
147
148 def _request_finished(self, sender, response, **kwargs):
149 try:
150 self._finish_span(response=response)
151 except Exception:
152 self.app.logger.exception("error finishing trace")
153 return response
154
155 def _request_exception(self, *args, **kwargs):
156 """ handles an error response. """
157 exception = kwargs.pop("exception", None)
158 try:
159 self._finish_span(exception=exception)
160 except Exception:
161 self.app.logger.exception("error tracing error")
162
163 def _template_started(self, sender, template, *args, **kwargs):
164 span = self._tracer.trace('flask.template')
165 try:
166 span.span_type = http.TEMPLATE
167 span.set_tag("flask.template", template.name or "string")
168 finally:
169 g.flask_datadog_tmpl_span = span
170
171 def _template_done(self, *arg, **kwargs):
172 span = getattr(g, 'flask_datadog_tmpl_span', None)
173 if span:
174 span.finish()
175
176
177 def _patch_render(tracer):
178 """ patch flask's render template methods with the given tracer. """
179 # fall back to patching global method
180 _render = flask.templating._render
181
182 def _traced_render(template, context, app):
183 with tracer.trace('flask.template') as span:
184 span.span_type = http.TEMPLATE
185 span.set_tag("flask.template", template.name or "string")
186 return _render(template, context, app)
187
188 flask.templating._render = _traced_render
189
190
191 def _signals_exist(names):
192 """ Return true if all of the given signals exist in this version of flask.
193 """
194 return all(getattr(signals, n, False) for n in names)
195
196 _blinker_not_installed_msg = (
197 "please install blinker to use flask signals. "
198 "http://flask.pocoo.org/docs/0.11/signals/"
199 )
200
[end of ddtrace/contrib/flask/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/flask/middleware.py b/ddtrace/contrib/flask/middleware.py
--- a/ddtrace/contrib/flask/middleware.py
+++ b/ddtrace/contrib/flask/middleware.py
@@ -9,6 +9,7 @@
import logging
# project
+from ... import compat
from ...ext import http, errors, AppTypes
# 3p
@@ -115,8 +116,8 @@
span.set_tag(errors.ERROR_TYPE, type(exception))
span.set_tag(errors.ERROR_MSG, exception)
- span.resource = str(request.endpoint or "").lower()
- span.set_tag(http.URL, str(request.base_url or ""))
+ span.resource = compat.to_unicode(request.endpoint or '').lower()
+ span.set_tag(http.URL, compat.to_unicode(request.base_url or ''))
span.set_tag(http.STATUS_CODE, code)
span.error = error
span.finish()
|
{"golden_diff": "diff --git a/ddtrace/contrib/flask/middleware.py b/ddtrace/contrib/flask/middleware.py\n--- a/ddtrace/contrib/flask/middleware.py\n+++ b/ddtrace/contrib/flask/middleware.py\n@@ -9,6 +9,7 @@\n import logging\n \n # project\n+from ... import compat\n from ...ext import http, errors, AppTypes\n \n # 3p\n@@ -115,8 +116,8 @@\n span.set_tag(errors.ERROR_TYPE, type(exception))\n span.set_tag(errors.ERROR_MSG, exception)\n \n- span.resource = str(request.endpoint or \"\").lower()\n- span.set_tag(http.URL, str(request.base_url or \"\"))\n+ span.resource = compat.to_unicode(request.endpoint or '').lower()\n+ span.set_tag(http.URL, compat.to_unicode(request.base_url or ''))\n span.set_tag(http.STATUS_CODE, code)\n span.error = error\n span.finish()\n", "issue": "Flask integration unicode support\nI ran into a small issue where the Flask integration is unable to set tags when they contain unicode characters.\n\nThe specific case we have is when unicode was in the url.\n\nhttps://github.com/DataDog/dd-trace-py/blob/8a9a7a59312de544d9c774e19872eb5e97cd8307/ddtrace/contrib/flask/middleware.py#L119\n\n```\n2016-10-28 13:47:58,953 - ERROR - error finishing trace\nTraceback (most recent call last):\n File \"/home/vagrant/.virtualenvs/kennel/local/lib/python2.7/site-packages/ddtrace/contrib/flask/middleware.py\", line 150, in _request_finished\n self._finish_span(response=response)\n File \"/home/vagrant/.virtualenvs/kennel/local/lib/python2.7/site-packages/ddtrace/contrib/flask/middleware.py\", line 119, in _finish_span\n span.set_tag(http.URL, str(request.base_url or \"\"))\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\xe8' in position 31: ordinal not in range(128)\n```\n\nHappy to submit a PR for a fix, not sure how you would like to go about it, my thinking was:\n\n``` python\nfrom ...compat import stringify\n\n# Use `stringify` instead of `str` throughout the module\nspan.set_tag(http.URL, stringify(request.base_url or \"\"))\n```\n\n", "before_files": [{"content": "\"\"\"\nDatadog trace code for flask.\n\nRequires a modern version of flask and the `blinker` library (which is a\ndependency of flask signals).\n\"\"\"\n\n# stdlib\nimport logging\n\n# project\nfrom ...ext import http, errors, AppTypes\n\n# 3p\nimport flask.templating\nfrom flask import g, request, signals\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TraceMiddleware(object):\n\n def __init__(self, app, tracer, service=\"flask\", use_signals=True):\n self.app = app\n self.app.logger.info(\"initializing trace middleware\")\n\n # save our traces.\n self._tracer = tracer\n self._service = service\n\n self._tracer.set_service_info(\n service=service,\n app=\"flask\",\n app_type=AppTypes.web,\n )\n\n # warn the user if signals are unavailable (because blinker isn't\n # installed) if they are asking to use them.\n if use_signals and not signals.signals_available:\n self.app.logger.info(_blinker_not_installed_msg)\n self.use_signals = use_signals and signals.signals_available\n\n # instrument request timings\n timing_signals = {\n 'request_started': self._request_started,\n 'request_finished': self._request_finished,\n 'got_request_exception': self._request_exception,\n }\n if self.use_signals and _signals_exist(timing_signals):\n self._connect(timing_signals)\n else:\n # Fallback to request hooks. Won't catch exceptions.\n # handle exceptions.\n self.app.before_request(self._before_request)\n self.app.after_request(self._after_request)\n\n # Instrument template rendering. If it's flask >= 0.11, we can use\n # signals, Otherwise we have to patch a global method.\n template_signals = {\n 'before_render_template': self._template_started, # added in 0.11\n 'template_rendered': self._template_done\n }\n if self.use_signals and _signals_exist(template_signals):\n self._connect(template_signals)\n else:\n _patch_render(tracer)\n\n def _flask_signals_exist(self, names):\n \"\"\" Return true if the current version of flask has all of the given\n signals.\n \"\"\"\n return all(getattr(signals, n, None) for n in names)\n\n def _connect(self, signal_to_handler):\n connected = True\n for name, handler in signal_to_handler.items():\n s = getattr(signals, name, None)\n if not s:\n connected = False\n log.warn(\"trying to instrument missing signal %s\", name)\n continue\n s.connect(handler, sender=self.app)\n return connected\n\n # common methods\n\n def _start_span(self):\n try:\n # if we have a parent span here, it means something was gone wrong.\n # might as well clear it out.\n self._tracer.clear_current_span()\n\n g.flask_datadog_span = self._tracer.trace(\n \"flask.request\",\n service=self._service,\n span_type=http.TYPE,\n )\n except Exception:\n self.app.logger.exception(\"error tracing request\")\n\n def _finish_span(self, response=None, exception=None):\n \"\"\" Close and finish the active span if it exists. \"\"\"\n span = getattr(g, 'flask_datadog_span', None)\n if span:\n if span.sampled:\n error = 0\n code = response.status_code if response else None\n\n # if we didn't get a response, but we did get an exception, set\n # codes accordingly.\n if not response and exception:\n error = 1\n code = 500\n span.set_tag(errors.ERROR_TYPE, type(exception))\n span.set_tag(errors.ERROR_MSG, exception)\n\n span.resource = str(request.endpoint or \"\").lower()\n span.set_tag(http.URL, str(request.base_url or \"\"))\n span.set_tag(http.STATUS_CODE, code)\n span.error = error\n span.finish()\n # Clear our span just in case.\n g.flask_datadog_span = None\n\n # Request hook methods\n\n def _before_request(self):\n \"\"\" Starts tracing the current request and stores it in the global\n request object.\n \"\"\"\n self._start_span()\n\n def _after_request(self, response):\n \"\"\" handles a successful response. \"\"\"\n try:\n self._finish_span(response=response)\n except Exception:\n self.app.logger.exception(\"error finishing trace\")\n finally:\n return response\n\n # signal handling methods\n\n def _request_started(self, sender):\n self._start_span()\n\n def _request_finished(self, sender, response, **kwargs):\n try:\n self._finish_span(response=response)\n except Exception:\n self.app.logger.exception(\"error finishing trace\")\n return response\n\n def _request_exception(self, *args, **kwargs):\n \"\"\" handles an error response. \"\"\"\n exception = kwargs.pop(\"exception\", None)\n try:\n self._finish_span(exception=exception)\n except Exception:\n self.app.logger.exception(\"error tracing error\")\n\n def _template_started(self, sender, template, *args, **kwargs):\n span = self._tracer.trace('flask.template')\n try:\n span.span_type = http.TEMPLATE\n span.set_tag(\"flask.template\", template.name or \"string\")\n finally:\n g.flask_datadog_tmpl_span = span\n\n def _template_done(self, *arg, **kwargs):\n span = getattr(g, 'flask_datadog_tmpl_span', None)\n if span:\n span.finish()\n\n\ndef _patch_render(tracer):\n \"\"\" patch flask's render template methods with the given tracer. \"\"\"\n # fall back to patching global method\n _render = flask.templating._render\n\n def _traced_render(template, context, app):\n with tracer.trace('flask.template') as span:\n span.span_type = http.TEMPLATE\n span.set_tag(\"flask.template\", template.name or \"string\")\n return _render(template, context, app)\n\n flask.templating._render = _traced_render\n\n\ndef _signals_exist(names):\n \"\"\" Return true if all of the given signals exist in this version of flask.\n \"\"\"\n return all(getattr(signals, n, False) for n in names)\n\n_blinker_not_installed_msg = (\n \"please install blinker to use flask signals. \"\n \"http://flask.pocoo.org/docs/0.11/signals/\"\n)\n", "path": "ddtrace/contrib/flask/middleware.py"}]}
| 2,799 | 199 |
gh_patches_debug_14855
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-9532
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[regression] String formatting has regressed since 0.60. ERROR: Format placeholder @9@ out of range.
**Describe the bug**
There is a regression of as yet unknown cause in Meson's string formatting, as discovered by @alex3kov and diagnosed by myself at libsixel/libsixel#50. This regression causes previously building package to no longer build.
**To Reproduce**
https://github.com/libsixel/libsixel/blob/v1.10.3/meson.build
**Expected behavior**
Good build
**system parameters**
* Is this a [cross build](https://mesonbuild.com/Cross-compilation.html) or just a plain native build (for the same computer)?
Plain build.
* what operating system (e.g. MacOS Catalina, Windows 10, CentOS 8.0, Ubuntu 18.04, etc.)
Arch Linux
* what Python version are you using e.g. 3.8.0
Python 3.9.7
* what `meson --version`
Versions above 0.59.2 I confirm are affected.
* what `ninja --version` if it's a Ninja build
We do not get to Ninja step, Meson is the cause.
</issue>
<code>
[start of mesonbuild/interpreter/primitives/string.py]
1 # Copyright 2021 The Meson development team
2 # SPDX-license-identifier: Apache-2.0
3
4 import re
5 import os
6 from pathlib import PurePath
7
8 import typing as T
9
10 from ...mesonlib import version_compare
11 from ...interpreterbase import (
12 ObjectHolder,
13 MesonOperator,
14 FeatureNew,
15 typed_operator,
16 noKwargs,
17 noPosargs,
18 typed_pos_args,
19
20 TYPE_var,
21 TYPE_kwargs,
22
23 InvalidArguments,
24 )
25
26
27 if T.TYPE_CHECKING:
28 # Object holders need the actual interpreter
29 from ...interpreter import Interpreter
30
31 class StringHolder(ObjectHolder[str]):
32 def __init__(self, obj: str, interpreter: 'Interpreter') -> None:
33 super().__init__(obj, interpreter)
34 self.methods.update({
35 'contains': self.contains_method,
36 'startswith': self.startswith_method,
37 'endswith': self.endswith_method,
38 'format': self.format_method,
39 'join': self.join_method,
40 'replace': self.replace_method,
41 'split': self.split_method,
42 'strip': self.strip_method,
43 'substring': self.substring_method,
44 'to_int': self.to_int_method,
45 'to_lower': self.to_lower_method,
46 'to_upper': self.to_upper_method,
47 'underscorify': self.underscorify_method,
48 'version_compare': self.version_compare_method,
49 })
50
51 self.trivial_operators.update({
52 # Arithmetic
53 MesonOperator.PLUS: (str, lambda x: self.held_object + x),
54
55 # Comparison
56 MesonOperator.EQUALS: (str, lambda x: self.held_object == x),
57 MesonOperator.NOT_EQUALS: (str, lambda x: self.held_object != x),
58 MesonOperator.GREATER: (str, lambda x: self.held_object > x),
59 MesonOperator.LESS: (str, lambda x: self.held_object < x),
60 MesonOperator.GREATER_EQUALS: (str, lambda x: self.held_object >= x),
61 MesonOperator.LESS_EQUALS: (str, lambda x: self.held_object <= x),
62 })
63
64 # Use actual methods for functions that require additional checks
65 self.operators.update({
66 MesonOperator.DIV: self.op_div,
67 MesonOperator.INDEX: self.op_index,
68 })
69
70 def display_name(self) -> str:
71 return 'str'
72
73 @noKwargs
74 @typed_pos_args('str.contains', str)
75 def contains_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
76 return self.held_object.find(args[0]) >= 0
77
78 @noKwargs
79 @typed_pos_args('str.startswith', str)
80 def startswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
81 return self.held_object.startswith(args[0])
82
83 @noKwargs
84 @typed_pos_args('str.endswith', str)
85 def endswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
86 return self.held_object.endswith(args[0])
87
88 @noKwargs
89 @typed_pos_args('str.format', varargs=object)
90 def format_method(self, args: T.Tuple[T.List[object]], kwargs: TYPE_kwargs) -> str:
91 arg_strings: T.List[str] = []
92 for arg in args[0]:
93 if isinstance(arg, bool): # Python boolean is upper case.
94 arg = str(arg).lower()
95 arg_strings.append(str(arg))
96
97 def arg_replace(match: T.Match[str]) -> str:
98 idx = int(match.group(1))
99 if idx >= len(arg_strings):
100 raise InvalidArguments(f'Format placeholder @{idx}@ out of range.')
101 return arg_strings[idx]
102
103 return re.sub(r'@(\d+)@', arg_replace, self.held_object)
104
105 @noKwargs
106 @typed_pos_args('str.join', varargs=str)
107 def join_method(self, args: T.Tuple[T.List[str]], kwargs: TYPE_kwargs) -> str:
108 return self.held_object.join(args[0])
109
110 @noKwargs
111 @typed_pos_args('str.replace', str, str)
112 def replace_method(self, args: T.Tuple[str, str], kwargs: TYPE_kwargs) -> str:
113 return self.held_object.replace(args[0], args[1])
114
115 @noKwargs
116 @typed_pos_args('str.split', optargs=[str])
117 def split_method(self, args: T.Tuple[T.Optional[str]], kwargs: TYPE_kwargs) -> T.List[str]:
118 return self.held_object.split(args[0])
119
120 @noKwargs
121 @typed_pos_args('str.strip', optargs=[str])
122 def strip_method(self, args: T.Tuple[T.Optional[str]], kwargs: TYPE_kwargs) -> str:
123 return self.held_object.strip(args[0])
124
125 @noKwargs
126 @typed_pos_args('str.substring', optargs=[int, int])
127 def substring_method(self, args: T.Tuple[T.Optional[int], T.Optional[int]], kwargs: TYPE_kwargs) -> str:
128 start = args[0] if args[0] is not None else 0
129 end = args[1] if args[1] is not None else len(self.held_object)
130 return self.held_object[start:end]
131
132 @noKwargs
133 @noPosargs
134 def to_int_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> int:
135 try:
136 return int(self.held_object)
137 except ValueError:
138 raise InvalidArguments(f'String {self.held_object!r} cannot be converted to int')
139
140 @noKwargs
141 @noPosargs
142 def to_lower_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:
143 return self.held_object.lower()
144
145 @noKwargs
146 @noPosargs
147 def to_upper_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:
148 return self.held_object.upper()
149
150 @noKwargs
151 @noPosargs
152 def underscorify_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:
153 return re.sub(r'[^a-zA-Z0-9]', '_', self.held_object)
154
155 @noKwargs
156 @typed_pos_args('str.version_compare', str)
157 def version_compare_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
158 return version_compare(self.held_object, args[0])
159
160 @FeatureNew('/ with string arguments', '0.49.0')
161 @typed_operator(MesonOperator.DIV, str)
162 def op_div(self, other: str) -> str:
163 return os.path.join(self.held_object, other).replace('\\', '/')
164
165 @typed_operator(MesonOperator.INDEX, int)
166 def op_index(self, other: int) -> str:
167 try:
168 return self.held_object[other]
169 except IndexError:
170 raise InvalidArguments(f'Index {other} out of bounds of string of size {len(self.held_object)}.')
171
172
173 class MesonVersionString(str):
174 pass
175
176 class MesonVersionStringHolder(StringHolder):
177 @noKwargs
178 @typed_pos_args('str.version_compare', str)
179 def version_compare_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
180 self.interpreter.tmp_meson_version = args[0]
181 return version_compare(self.held_object, args[0])
182
[end of mesonbuild/interpreter/primitives/string.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mesonbuild/interpreter/primitives/string.py b/mesonbuild/interpreter/primitives/string.py
--- a/mesonbuild/interpreter/primitives/string.py
+++ b/mesonbuild/interpreter/primitives/string.py
@@ -13,6 +13,7 @@
MesonOperator,
FeatureNew,
typed_operator,
+ noArgsFlattening,
noKwargs,
noPosargs,
typed_pos_args,
@@ -85,6 +86,7 @@
def endswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:
return self.held_object.endswith(args[0])
+ @noArgsFlattening
@noKwargs
@typed_pos_args('str.format', varargs=object)
def format_method(self, args: T.Tuple[T.List[object]], kwargs: TYPE_kwargs) -> str:
|
{"golden_diff": "diff --git a/mesonbuild/interpreter/primitives/string.py b/mesonbuild/interpreter/primitives/string.py\n--- a/mesonbuild/interpreter/primitives/string.py\n+++ b/mesonbuild/interpreter/primitives/string.py\n@@ -13,6 +13,7 @@\n MesonOperator,\n FeatureNew,\n typed_operator,\n+ noArgsFlattening,\n noKwargs,\n noPosargs,\n typed_pos_args,\n@@ -85,6 +86,7 @@\n def endswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:\n return self.held_object.endswith(args[0])\n \n+ @noArgsFlattening\n @noKwargs\n @typed_pos_args('str.format', varargs=object)\n def format_method(self, args: T.Tuple[T.List[object]], kwargs: TYPE_kwargs) -> str:\n", "issue": "[regression] String formatting has regressed since 0.60. ERROR: Format placeholder @9@ out of range.\n**Describe the bug**\r\nThere is a regression of as yet unknown cause in Meson's string formatting, as discovered by @alex3kov and diagnosed by myself at libsixel/libsixel#50. This regression causes previously building package to no longer build.\r\n\r\n**To Reproduce**\r\nhttps://github.com/libsixel/libsixel/blob/v1.10.3/meson.build\r\n\r\n**Expected behavior**\r\nGood build\r\n\r\n**system parameters**\r\n* Is this a [cross build](https://mesonbuild.com/Cross-compilation.html) or just a plain native build (for the same computer)?\r\n Plain build.\r\n* what operating system (e.g. MacOS Catalina, Windows 10, CentOS 8.0, Ubuntu 18.04, etc.)\r\n Arch Linux\r\n* what Python version are you using e.g. 3.8.0\r\n Python 3.9.7\r\n* what `meson --version`\r\n Versions above 0.59.2 I confirm are affected.\r\n* what `ninja --version` if it's a Ninja build\r\n We do not get to Ninja step, Meson is the cause.\n", "before_files": [{"content": "# Copyright 2021 The Meson development team\n# SPDX-license-identifier: Apache-2.0\n\nimport re\nimport os\nfrom pathlib import PurePath\n\nimport typing as T\n\nfrom ...mesonlib import version_compare\nfrom ...interpreterbase import (\n ObjectHolder,\n MesonOperator,\n FeatureNew,\n typed_operator,\n noKwargs,\n noPosargs,\n typed_pos_args,\n\n TYPE_var,\n TYPE_kwargs,\n\n InvalidArguments,\n)\n\n\nif T.TYPE_CHECKING:\n # Object holders need the actual interpreter\n from ...interpreter import Interpreter\n\nclass StringHolder(ObjectHolder[str]):\n def __init__(self, obj: str, interpreter: 'Interpreter') -> None:\n super().__init__(obj, interpreter)\n self.methods.update({\n 'contains': self.contains_method,\n 'startswith': self.startswith_method,\n 'endswith': self.endswith_method,\n 'format': self.format_method,\n 'join': self.join_method,\n 'replace': self.replace_method,\n 'split': self.split_method,\n 'strip': self.strip_method,\n 'substring': self.substring_method,\n 'to_int': self.to_int_method,\n 'to_lower': self.to_lower_method,\n 'to_upper': self.to_upper_method,\n 'underscorify': self.underscorify_method,\n 'version_compare': self.version_compare_method,\n })\n\n self.trivial_operators.update({\n # Arithmetic\n MesonOperator.PLUS: (str, lambda x: self.held_object + x),\n\n # Comparison\n MesonOperator.EQUALS: (str, lambda x: self.held_object == x),\n MesonOperator.NOT_EQUALS: (str, lambda x: self.held_object != x),\n MesonOperator.GREATER: (str, lambda x: self.held_object > x),\n MesonOperator.LESS: (str, lambda x: self.held_object < x),\n MesonOperator.GREATER_EQUALS: (str, lambda x: self.held_object >= x),\n MesonOperator.LESS_EQUALS: (str, lambda x: self.held_object <= x),\n })\n\n # Use actual methods for functions that require additional checks\n self.operators.update({\n MesonOperator.DIV: self.op_div,\n MesonOperator.INDEX: self.op_index,\n })\n\n def display_name(self) -> str:\n return 'str'\n\n @noKwargs\n @typed_pos_args('str.contains', str)\n def contains_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:\n return self.held_object.find(args[0]) >= 0\n\n @noKwargs\n @typed_pos_args('str.startswith', str)\n def startswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:\n return self.held_object.startswith(args[0])\n\n @noKwargs\n @typed_pos_args('str.endswith', str)\n def endswith_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:\n return self.held_object.endswith(args[0])\n\n @noKwargs\n @typed_pos_args('str.format', varargs=object)\n def format_method(self, args: T.Tuple[T.List[object]], kwargs: TYPE_kwargs) -> str:\n arg_strings: T.List[str] = []\n for arg in args[0]:\n if isinstance(arg, bool): # Python boolean is upper case.\n arg = str(arg).lower()\n arg_strings.append(str(arg))\n\n def arg_replace(match: T.Match[str]) -> str:\n idx = int(match.group(1))\n if idx >= len(arg_strings):\n raise InvalidArguments(f'Format placeholder @{idx}@ out of range.')\n return arg_strings[idx]\n\n return re.sub(r'@(\\d+)@', arg_replace, self.held_object)\n\n @noKwargs\n @typed_pos_args('str.join', varargs=str)\n def join_method(self, args: T.Tuple[T.List[str]], kwargs: TYPE_kwargs) -> str:\n return self.held_object.join(args[0])\n\n @noKwargs\n @typed_pos_args('str.replace', str, str)\n def replace_method(self, args: T.Tuple[str, str], kwargs: TYPE_kwargs) -> str:\n return self.held_object.replace(args[0], args[1])\n\n @noKwargs\n @typed_pos_args('str.split', optargs=[str])\n def split_method(self, args: T.Tuple[T.Optional[str]], kwargs: TYPE_kwargs) -> T.List[str]:\n return self.held_object.split(args[0])\n\n @noKwargs\n @typed_pos_args('str.strip', optargs=[str])\n def strip_method(self, args: T.Tuple[T.Optional[str]], kwargs: TYPE_kwargs) -> str:\n return self.held_object.strip(args[0])\n\n @noKwargs\n @typed_pos_args('str.substring', optargs=[int, int])\n def substring_method(self, args: T.Tuple[T.Optional[int], T.Optional[int]], kwargs: TYPE_kwargs) -> str:\n start = args[0] if args[0] is not None else 0\n end = args[1] if args[1] is not None else len(self.held_object)\n return self.held_object[start:end]\n\n @noKwargs\n @noPosargs\n def to_int_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> int:\n try:\n return int(self.held_object)\n except ValueError:\n raise InvalidArguments(f'String {self.held_object!r} cannot be converted to int')\n\n @noKwargs\n @noPosargs\n def to_lower_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:\n return self.held_object.lower()\n\n @noKwargs\n @noPosargs\n def to_upper_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:\n return self.held_object.upper()\n\n @noKwargs\n @noPosargs\n def underscorify_method(self, args: T.List[TYPE_var], kwargs: TYPE_kwargs) -> str:\n return re.sub(r'[^a-zA-Z0-9]', '_', self.held_object)\n\n @noKwargs\n @typed_pos_args('str.version_compare', str)\n def version_compare_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:\n return version_compare(self.held_object, args[0])\n\n @FeatureNew('/ with string arguments', '0.49.0')\n @typed_operator(MesonOperator.DIV, str)\n def op_div(self, other: str) -> str:\n return os.path.join(self.held_object, other).replace('\\\\', '/')\n\n @typed_operator(MesonOperator.INDEX, int)\n def op_index(self, other: int) -> str:\n try:\n return self.held_object[other]\n except IndexError:\n raise InvalidArguments(f'Index {other} out of bounds of string of size {len(self.held_object)}.')\n\n\nclass MesonVersionString(str):\n pass\n\nclass MesonVersionStringHolder(StringHolder):\n @noKwargs\n @typed_pos_args('str.version_compare', str)\n def version_compare_method(self, args: T.Tuple[str], kwargs: TYPE_kwargs) -> bool:\n self.interpreter.tmp_meson_version = args[0]\n return version_compare(self.held_object, args[0])\n", "path": "mesonbuild/interpreter/primitives/string.py"}]}
| 2,904 | 195 |
gh_patches_debug_26613
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1852
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/sqlalchemy/dto.py]
1 from __future__ import annotations
2
3 from functools import singledispatchmethod
4 from typing import TYPE_CHECKING, Generic, TypeVar
5
6 from sqlalchemy import Column, inspect, orm, sql
7 from sqlalchemy.ext.associationproxy import AssociationProxy, AssociationProxyExtensionType
8 from sqlalchemy.ext.hybrid import HybridExtensionType, hybrid_property
9 from sqlalchemy.orm import (
10 ColumnProperty,
11 DeclarativeBase,
12 InspectionAttr,
13 Mapped,
14 NotExtension,
15 QueryableAttribute,
16 RelationshipProperty,
17 )
18
19 from litestar.dto.factory.abc import AbstractDTOFactory
20 from litestar.dto.factory.data_structures import FieldDefinition
21 from litestar.dto.factory.field import DTO_FIELD_META_KEY, DTOField, Mark
22 from litestar.dto.factory.utils import get_model_type_hints
23 from litestar.types.empty import Empty
24 from litestar.utils.helpers import get_fully_qualified_class_name
25 from litestar.utils.signature import ParsedSignature
26
27 if TYPE_CHECKING:
28 from typing import Any, ClassVar, Collection, Generator
29
30 from typing_extensions import TypeAlias
31
32 from litestar.typing import ParsedType
33
34 __all__ = ("SQLAlchemyDTO",)
35
36 T = TypeVar("T", bound="DeclarativeBase | Collection[DeclarativeBase]")
37 ElementType: TypeAlias = "Column[Any] | RelationshipProperty[Any]"
38
39 SQLA_NS = {**vars(orm), **vars(sql)}
40
41
42 class SQLAlchemyDTO(AbstractDTOFactory[T], Generic[T]):
43 """Support for domain modelling with SQLAlchemy."""
44
45 __slots__ = ()
46
47 model_type: ClassVar[type[DeclarativeBase]]
48
49 @singledispatchmethod
50 @classmethod
51 def handle_orm_descriptor(
52 cls,
53 extension_type: NotExtension | AssociationProxyExtensionType | HybridExtensionType,
54 orm_descriptor: InspectionAttr,
55 key: str,
56 model_type_hints: dict[str, ParsedType],
57 model_name: str,
58 ) -> list[FieldDefinition]:
59 raise NotImplementedError(f"Unsupported extension type: {extension_type}")
60
61 @handle_orm_descriptor.register(NotExtension)
62 @classmethod
63 def _(
64 cls,
65 extension_type: NotExtension,
66 key: str,
67 orm_descriptor: InspectionAttr,
68 model_type_hints: dict[str, ParsedType],
69 model_name: str,
70 ) -> list[FieldDefinition]:
71 if not isinstance(orm_descriptor, QueryableAttribute):
72 raise NotImplementedError(f"Unexpected descriptor type for '{extension_type}': '{orm_descriptor}'")
73
74 elem: ElementType
75 if isinstance(orm_descriptor.property, ColumnProperty):
76 if not isinstance(orm_descriptor.property.expression, Column):
77 raise NotImplementedError(f"Expected 'Column', got: '{orm_descriptor.property.expression}'")
78 elem = orm_descriptor.property.expression
79 elif isinstance(orm_descriptor.property, RelationshipProperty):
80 elem = orm_descriptor.property
81 else:
82 raise NotImplementedError(f"Unhandled property type: '{orm_descriptor.property}'")
83
84 default, default_factory = _detect_defaults(elem)
85
86 if (parsed_type := model_type_hints[key]).origin is Mapped:
87 (parsed_type,) = parsed_type.inner_types
88 else:
89 raise NotImplementedError(f"Expected 'Mapped' origin, got: '{parsed_type.origin}'")
90
91 return [
92 FieldDefinition(
93 name=key,
94 default=default,
95 parsed_type=parsed_type,
96 default_factory=default_factory,
97 dto_field=elem.info.get(DTO_FIELD_META_KEY, DTOField()),
98 unique_model_name=model_name,
99 dto_for=None,
100 )
101 ]
102
103 @handle_orm_descriptor.register(AssociationProxyExtensionType)
104 @classmethod
105 def _(
106 cls,
107 extension_type: AssociationProxyExtensionType,
108 key: str,
109 orm_descriptor: InspectionAttr,
110 model_type_hints: dict[str, ParsedType],
111 model_name: str,
112 ) -> list[FieldDefinition]:
113 if not isinstance(orm_descriptor, AssociationProxy):
114 raise NotImplementedError(f"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'")
115
116 if (parsed_type := model_type_hints[key]).origin is AssociationProxy:
117 (parsed_type,) = parsed_type.inner_types
118 else:
119 raise NotImplementedError(f"Expected 'AssociationProxy' origin, got: '{parsed_type.origin}'")
120
121 return [
122 FieldDefinition(
123 name=key,
124 default=Empty,
125 parsed_type=parsed_type,
126 default_factory=None,
127 dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)),
128 unique_model_name=model_name,
129 dto_for=None,
130 )
131 ]
132
133 @handle_orm_descriptor.register(HybridExtensionType)
134 @classmethod
135 def _(
136 cls,
137 extension_type: HybridExtensionType,
138 key: str,
139 orm_descriptor: InspectionAttr,
140 model_type_hints: dict[str, ParsedType],
141 model_name: str,
142 ) -> list[FieldDefinition]:
143 if not isinstance(orm_descriptor, hybrid_property):
144 raise NotImplementedError(f"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'")
145
146 getter_sig = ParsedSignature.from_fn(orm_descriptor.fget, {})
147
148 field_defs = [
149 FieldDefinition(
150 name=orm_descriptor.__name__,
151 default=Empty,
152 parsed_type=getter_sig.return_type,
153 default_factory=None,
154 dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)),
155 unique_model_name=model_name,
156 dto_for="return",
157 )
158 ]
159
160 if orm_descriptor.fset is not None:
161 setter_sig = ParsedSignature.from_fn(orm_descriptor.fset, {})
162 field_defs.append(
163 FieldDefinition(
164 name=orm_descriptor.__name__,
165 default=Empty,
166 parsed_type=next(iter(setter_sig.parameters.values())).parsed_type,
167 default_factory=None,
168 dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.WRITE_ONLY)),
169 unique_model_name=model_name,
170 dto_for="data",
171 )
172 )
173
174 return field_defs
175
176 @classmethod
177 def generate_field_definitions(cls, model_type: type[DeclarativeBase]) -> Generator[FieldDefinition, None, None]:
178 if (mapper := inspect(model_type)) is None: # pragma: no cover
179 raise RuntimeError("Unexpected `None` value for mapper.")
180
181 # includes SQLAlchemy names and other mapped class names in the forward reference resolution namespace
182 namespace = {**SQLA_NS, **{m.class_.__name__: m.class_ for m in mapper.registry.mappers if m is not mapper}}
183 model_type_hints = get_model_type_hints(model_type, namespace=namespace)
184 model_name = get_fully_qualified_class_name(model_type)
185
186 # the same hybrid property descriptor can be included in `all_orm_descriptors` multiple times, once
187 # for each method name it is bound to. We only need to see it once, so track views of it here.
188 seen_hybrid_descriptors: set[hybrid_property] = set()
189 for key, orm_descriptor in mapper.all_orm_descriptors.items():
190 if isinstance(orm_descriptor, hybrid_property):
191 if orm_descriptor in seen_hybrid_descriptors:
192 continue
193 seen_hybrid_descriptors.add(orm_descriptor)
194
195 yield from cls.handle_orm_descriptor(
196 orm_descriptor.extension_type, key, orm_descriptor, model_type_hints, model_name
197 )
198
199 @classmethod
200 def detect_nested_field(cls, parsed_type: ParsedType) -> bool:
201 return parsed_type.is_subclass_of(DeclarativeBase)
202
203
204 def _detect_defaults(elem: ElementType) -> tuple[Any, Any]:
205 default: Any = Empty
206 default_factory: Any = None # pyright:ignore
207 if sqla_default := getattr(elem, "default", None):
208 if sqla_default.is_scalar:
209 default = sqla_default.arg
210 elif sqla_default.is_callable:
211
212 def default_factory(d: Any = sqla_default) -> Any:
213 return d.arg({})
214
215 else:
216 raise ValueError("Unexpected default type")
217 else:
218 if getattr(elem, "nullable", False):
219 default = None
220 return default, default_factory
221
[end of litestar/contrib/sqlalchemy/dto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/litestar/contrib/sqlalchemy/dto.py b/litestar/contrib/sqlalchemy/dto.py
--- a/litestar/contrib/sqlalchemy/dto.py
+++ b/litestar/contrib/sqlalchemy/dto.py
@@ -20,6 +20,7 @@
from litestar.dto.factory.data_structures import FieldDefinition
from litestar.dto.factory.field import DTO_FIELD_META_KEY, DTOField, Mark
from litestar.dto.factory.utils import get_model_type_hints
+from litestar.exceptions import ImproperlyConfiguredException
from litestar.types.empty import Empty
from litestar.utils.helpers import get_fully_qualified_class_name
from litestar.utils.signature import ParsedSignature
@@ -83,10 +84,15 @@
default, default_factory = _detect_defaults(elem)
- if (parsed_type := model_type_hints[key]).origin is Mapped:
- (parsed_type,) = parsed_type.inner_types
- else:
- raise NotImplementedError(f"Expected 'Mapped' origin, got: '{parsed_type.origin}'")
+ try:
+ if (parsed_type := model_type_hints[key]).origin is Mapped:
+ (parsed_type,) = parsed_type.inner_types
+ else:
+ raise NotImplementedError(f"Expected 'Mapped' origin, got: '{parsed_type.origin}'")
+ except KeyError as e:
+ raise ImproperlyConfiguredException(
+ f"No type information found for '{orm_descriptor}'. Has a type annotation been added to the column?"
+ ) from e
return [
FieldDefinition(
|
{"golden_diff": "diff --git a/litestar/contrib/sqlalchemy/dto.py b/litestar/contrib/sqlalchemy/dto.py\n--- a/litestar/contrib/sqlalchemy/dto.py\n+++ b/litestar/contrib/sqlalchemy/dto.py\n@@ -20,6 +20,7 @@\n from litestar.dto.factory.data_structures import FieldDefinition\n from litestar.dto.factory.field import DTO_FIELD_META_KEY, DTOField, Mark\n from litestar.dto.factory.utils import get_model_type_hints\n+from litestar.exceptions import ImproperlyConfiguredException\n from litestar.types.empty import Empty\n from litestar.utils.helpers import get_fully_qualified_class_name\n from litestar.utils.signature import ParsedSignature\n@@ -83,10 +84,15 @@\n \n default, default_factory = _detect_defaults(elem)\n \n- if (parsed_type := model_type_hints[key]).origin is Mapped:\n- (parsed_type,) = parsed_type.inner_types\n- else:\n- raise NotImplementedError(f\"Expected 'Mapped' origin, got: '{parsed_type.origin}'\")\n+ try:\n+ if (parsed_type := model_type_hints[key]).origin is Mapped:\n+ (parsed_type,) = parsed_type.inner_types\n+ else:\n+ raise NotImplementedError(f\"Expected 'Mapped' origin, got: '{parsed_type.origin}'\")\n+ except KeyError as e:\n+ raise ImproperlyConfiguredException(\n+ f\"No type information found for '{orm_descriptor}'. Has a type annotation been added to the column?\"\n+ ) from e\n \n return [\n FieldDefinition(\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import singledispatchmethod\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nfrom sqlalchemy import Column, inspect, orm, sql\nfrom sqlalchemy.ext.associationproxy import AssociationProxy, AssociationProxyExtensionType\nfrom sqlalchemy.ext.hybrid import HybridExtensionType, hybrid_property\nfrom sqlalchemy.orm import (\n ColumnProperty,\n DeclarativeBase,\n InspectionAttr,\n Mapped,\n NotExtension,\n QueryableAttribute,\n RelationshipProperty,\n)\n\nfrom litestar.dto.factory.abc import AbstractDTOFactory\nfrom litestar.dto.factory.data_structures import FieldDefinition\nfrom litestar.dto.factory.field import DTO_FIELD_META_KEY, DTOField, Mark\nfrom litestar.dto.factory.utils import get_model_type_hints\nfrom litestar.types.empty import Empty\nfrom litestar.utils.helpers import get_fully_qualified_class_name\nfrom litestar.utils.signature import ParsedSignature\n\nif TYPE_CHECKING:\n from typing import Any, ClassVar, Collection, Generator\n\n from typing_extensions import TypeAlias\n\n from litestar.typing import ParsedType\n\n__all__ = (\"SQLAlchemyDTO\",)\n\nT = TypeVar(\"T\", bound=\"DeclarativeBase | Collection[DeclarativeBase]\")\nElementType: TypeAlias = \"Column[Any] | RelationshipProperty[Any]\"\n\nSQLA_NS = {**vars(orm), **vars(sql)}\n\n\nclass SQLAlchemyDTO(AbstractDTOFactory[T], Generic[T]):\n \"\"\"Support for domain modelling with SQLAlchemy.\"\"\"\n\n __slots__ = ()\n\n model_type: ClassVar[type[DeclarativeBase]]\n\n @singledispatchmethod\n @classmethod\n def handle_orm_descriptor(\n cls,\n extension_type: NotExtension | AssociationProxyExtensionType | HybridExtensionType,\n orm_descriptor: InspectionAttr,\n key: str,\n model_type_hints: dict[str, ParsedType],\n model_name: str,\n ) -> list[FieldDefinition]:\n raise NotImplementedError(f\"Unsupported extension type: {extension_type}\")\n\n @handle_orm_descriptor.register(NotExtension)\n @classmethod\n def _(\n cls,\n extension_type: NotExtension,\n key: str,\n orm_descriptor: InspectionAttr,\n model_type_hints: dict[str, ParsedType],\n model_name: str,\n ) -> list[FieldDefinition]:\n if not isinstance(orm_descriptor, QueryableAttribute):\n raise NotImplementedError(f\"Unexpected descriptor type for '{extension_type}': '{orm_descriptor}'\")\n\n elem: ElementType\n if isinstance(orm_descriptor.property, ColumnProperty):\n if not isinstance(orm_descriptor.property.expression, Column):\n raise NotImplementedError(f\"Expected 'Column', got: '{orm_descriptor.property.expression}'\")\n elem = orm_descriptor.property.expression\n elif isinstance(orm_descriptor.property, RelationshipProperty):\n elem = orm_descriptor.property\n else:\n raise NotImplementedError(f\"Unhandled property type: '{orm_descriptor.property}'\")\n\n default, default_factory = _detect_defaults(elem)\n\n if (parsed_type := model_type_hints[key]).origin is Mapped:\n (parsed_type,) = parsed_type.inner_types\n else:\n raise NotImplementedError(f\"Expected 'Mapped' origin, got: '{parsed_type.origin}'\")\n\n return [\n FieldDefinition(\n name=key,\n default=default,\n parsed_type=parsed_type,\n default_factory=default_factory,\n dto_field=elem.info.get(DTO_FIELD_META_KEY, DTOField()),\n unique_model_name=model_name,\n dto_for=None,\n )\n ]\n\n @handle_orm_descriptor.register(AssociationProxyExtensionType)\n @classmethod\n def _(\n cls,\n extension_type: AssociationProxyExtensionType,\n key: str,\n orm_descriptor: InspectionAttr,\n model_type_hints: dict[str, ParsedType],\n model_name: str,\n ) -> list[FieldDefinition]:\n if not isinstance(orm_descriptor, AssociationProxy):\n raise NotImplementedError(f\"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'\")\n\n if (parsed_type := model_type_hints[key]).origin is AssociationProxy:\n (parsed_type,) = parsed_type.inner_types\n else:\n raise NotImplementedError(f\"Expected 'AssociationProxy' origin, got: '{parsed_type.origin}'\")\n\n return [\n FieldDefinition(\n name=key,\n default=Empty,\n parsed_type=parsed_type,\n default_factory=None,\n dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)),\n unique_model_name=model_name,\n dto_for=None,\n )\n ]\n\n @handle_orm_descriptor.register(HybridExtensionType)\n @classmethod\n def _(\n cls,\n extension_type: HybridExtensionType,\n key: str,\n orm_descriptor: InspectionAttr,\n model_type_hints: dict[str, ParsedType],\n model_name: str,\n ) -> list[FieldDefinition]:\n if not isinstance(orm_descriptor, hybrid_property):\n raise NotImplementedError(f\"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'\")\n\n getter_sig = ParsedSignature.from_fn(orm_descriptor.fget, {})\n\n field_defs = [\n FieldDefinition(\n name=orm_descriptor.__name__,\n default=Empty,\n parsed_type=getter_sig.return_type,\n default_factory=None,\n dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)),\n unique_model_name=model_name,\n dto_for=\"return\",\n )\n ]\n\n if orm_descriptor.fset is not None:\n setter_sig = ParsedSignature.from_fn(orm_descriptor.fset, {})\n field_defs.append(\n FieldDefinition(\n name=orm_descriptor.__name__,\n default=Empty,\n parsed_type=next(iter(setter_sig.parameters.values())).parsed_type,\n default_factory=None,\n dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.WRITE_ONLY)),\n unique_model_name=model_name,\n dto_for=\"data\",\n )\n )\n\n return field_defs\n\n @classmethod\n def generate_field_definitions(cls, model_type: type[DeclarativeBase]) -> Generator[FieldDefinition, None, None]:\n if (mapper := inspect(model_type)) is None: # pragma: no cover\n raise RuntimeError(\"Unexpected `None` value for mapper.\")\n\n # includes SQLAlchemy names and other mapped class names in the forward reference resolution namespace\n namespace = {**SQLA_NS, **{m.class_.__name__: m.class_ for m in mapper.registry.mappers if m is not mapper}}\n model_type_hints = get_model_type_hints(model_type, namespace=namespace)\n model_name = get_fully_qualified_class_name(model_type)\n\n # the same hybrid property descriptor can be included in `all_orm_descriptors` multiple times, once\n # for each method name it is bound to. We only need to see it once, so track views of it here.\n seen_hybrid_descriptors: set[hybrid_property] = set()\n for key, orm_descriptor in mapper.all_orm_descriptors.items():\n if isinstance(orm_descriptor, hybrid_property):\n if orm_descriptor in seen_hybrid_descriptors:\n continue\n seen_hybrid_descriptors.add(orm_descriptor)\n\n yield from cls.handle_orm_descriptor(\n orm_descriptor.extension_type, key, orm_descriptor, model_type_hints, model_name\n )\n\n @classmethod\n def detect_nested_field(cls, parsed_type: ParsedType) -> bool:\n return parsed_type.is_subclass_of(DeclarativeBase)\n\n\ndef _detect_defaults(elem: ElementType) -> tuple[Any, Any]:\n default: Any = Empty\n default_factory: Any = None # pyright:ignore\n if sqla_default := getattr(elem, \"default\", None):\n if sqla_default.is_scalar:\n default = sqla_default.arg\n elif sqla_default.is_callable:\n\n def default_factory(d: Any = sqla_default) -> Any:\n return d.arg({})\n\n else:\n raise ValueError(\"Unexpected default type\")\n else:\n if getattr(elem, \"nullable\", False):\n default = None\n return default, default_factory\n", "path": "litestar/contrib/sqlalchemy/dto.py"}]}
| 3,000 | 342 |
gh_patches_debug_19003
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-5302
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_OPENAPI_20 incorrectly flags API keys via HTTPS
**Describe the issue**
#5253 added CKV_OPENAPI_20 with the message "Ensure that API keys are not sent over cleartext", but the [check](https://github.com/bridgecrewio/checkov/blob/main/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py) does not check the API's supported schemes.
If the intent of this check is to prevent cleartext use of API keys, then if the root level [`schemes`](https://swagger.io/docs/specification/2-0/api-host-and-base-path/) key in OpenAPI 2.0 or [`servers`](https://swagger.io/docs/specification/api-host-and-base-path/) key in OpenAPI 3.0 specifies only `https` (2.0) or only `url`s which are HTTPS (3.0), this check should pass.
**Examples**
[fail2.json](https://github.com/bridgecrewio/checkov/blob/main/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail2.json) and its YAML counterpart should fail, but if they specified `"schemes": ["https"]`, they should not.
Ditto for the OpenAPI 3.0 version of this example.
**Version (please complete the following information):**
2.3.312
**Additional context**
It may be that the message is wrong and you actually intend to flag all use of API keys, but if that's the case, the message should convey that. I also would argue that header API keys should not be scrutinized the same way as query parameter API keys, since the risk of leaking the API key unintentionally is higher with the latter.
</issue>
<code>
[start of checkov/openapi/checks/resource/generic/ClearTextAPIKey.py]
1 from __future__ import annotations
2
3 from typing import Any
4 from checkov.common.models.enums import CheckResult, CheckCategories
5 from checkov.common.checks.enums import BlockType
6 from checkov.common.util.consts import LINE_FIELD_NAMES
7 from checkov.openapi.checks.base_openapi_check import BaseOpenapiCheck
8
9
10 class ClearTestAPIKey(BaseOpenapiCheck):
11 def __init__(self) -> None:
12 id = "CKV_OPENAPI_20"
13 name = "Ensure that API keys are not sent over cleartext"
14 categories = (CheckCategories.API_SECURITY,)
15 supported_resources = ('paths',)
16 super().__init__(name=name, id=id, categories=categories, supported_entities=supported_resources,
17 block_type=BlockType.DOCUMENT)
18
19 def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class
20 components = conf.get("components")
21 security_def = conf.get("securityDefinitions")
22 if components and isinstance(components, dict):
23 security_schemes = components.get("securitySchemes") or {}
24 elif security_def:
25 security_schemes = security_def
26 else:
27 return CheckResult.PASSED, conf
28
29 paths = conf.get('paths')
30 if not isinstance(paths, dict):
31 return CheckResult.PASSED, security_schemes
32
33 filtered_dict = {}
34 if isinstance(security_schemes, dict):
35 for name, scheme in security_schemes.items():
36 if isinstance(scheme, dict) and scheme.get('type') == "apiKey":
37 filtered_dict[name] = scheme
38
39 if not filtered_dict:
40 return CheckResult.PASSED, security_schemes
41
42 for key, path in paths.items():
43 if not path:
44 continue
45 if key in LINE_FIELD_NAMES:
46 continue
47 for value in path.values():
48 if not isinstance(value, dict):
49 continue
50 operation_security = value.get('security')
51 if operation_security and isinstance(operation_security, list):
52 for sec in operation_security[0]:
53 if sec in filtered_dict:
54 return CheckResult.FAILED, security_schemes
55
56 return CheckResult.PASSED, conf
57
58
59 check = ClearTestAPIKey()
60
[end of checkov/openapi/checks/resource/generic/ClearTextAPIKey.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
--- a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
+++ b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
@@ -17,6 +17,16 @@
block_type=BlockType.DOCUMENT)
def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class
+ schemes = conf.get("schemes")
+ if schemes and isinstance(schemes, list):
+ if "http" not in schemes and "wp" not in schemes:
+ return CheckResult.PASSED, conf
+
+ servers = conf.get("servers")
+ if servers and isinstance(servers, list):
+ if not any(server['url'].startswith('http://') for server in servers):
+ return CheckResult.PASSED, conf
+
components = conf.get("components")
security_def = conf.get("securityDefinitions")
if components and isinstance(components, dict):
|
{"golden_diff": "diff --git a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py\n--- a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py\n+++ b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py\n@@ -17,6 +17,16 @@\n block_type=BlockType.DOCUMENT)\n \n def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class\n+ schemes = conf.get(\"schemes\")\n+ if schemes and isinstance(schemes, list):\n+ if \"http\" not in schemes and \"wp\" not in schemes:\n+ return CheckResult.PASSED, conf\n+\n+ servers = conf.get(\"servers\")\n+ if servers and isinstance(servers, list):\n+ if not any(server['url'].startswith('http://') for server in servers):\n+ return CheckResult.PASSED, conf\n+\n components = conf.get(\"components\")\n security_def = conf.get(\"securityDefinitions\")\n if components and isinstance(components, dict):\n", "issue": "CKV_OPENAPI_20 incorrectly flags API keys via HTTPS\n**Describe the issue**\r\n\r\n#5253 added CKV_OPENAPI_20 with the message \"Ensure that API keys are not sent over cleartext\", but the [check](https://github.com/bridgecrewio/checkov/blob/main/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py) does not check the API's supported schemes.\r\n\r\nIf the intent of this check is to prevent cleartext use of API keys, then if the root level [`schemes`](https://swagger.io/docs/specification/2-0/api-host-and-base-path/) key in OpenAPI 2.0 or [`servers`](https://swagger.io/docs/specification/api-host-and-base-path/) key in OpenAPI 3.0 specifies only `https` (2.0) or only `url`s which are HTTPS (3.0), this check should pass.\r\n\r\n**Examples**\r\n\r\n[fail2.json](https://github.com/bridgecrewio/checkov/blob/main/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail2.json) and its YAML counterpart should fail, but if they specified `\"schemes\": [\"https\"]`, they should not.\r\n\r\nDitto for the OpenAPI 3.0 version of this example.\r\n\r\n**Version (please complete the following information):**\r\n\r\n2.3.312\r\n\r\n**Additional context**\r\n\r\nIt may be that the message is wrong and you actually intend to flag all use of API keys, but if that's the case, the message should convey that. I also would argue that header API keys should not be scrutinized the same way as query parameter API keys, since the risk of leaking the API key unintentionally is higher with the latter.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.checks.enums import BlockType\nfrom checkov.common.util.consts import LINE_FIELD_NAMES\nfrom checkov.openapi.checks.base_openapi_check import BaseOpenapiCheck\n\n\nclass ClearTestAPIKey(BaseOpenapiCheck):\n def __init__(self) -> None:\n id = \"CKV_OPENAPI_20\"\n name = \"Ensure that API keys are not sent over cleartext\"\n categories = (CheckCategories.API_SECURITY,)\n supported_resources = ('paths',)\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_resources,\n block_type=BlockType.DOCUMENT)\n\n def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class\n components = conf.get(\"components\")\n security_def = conf.get(\"securityDefinitions\")\n if components and isinstance(components, dict):\n security_schemes = components.get(\"securitySchemes\") or {}\n elif security_def:\n security_schemes = security_def\n else:\n return CheckResult.PASSED, conf\n\n paths = conf.get('paths')\n if not isinstance(paths, dict):\n return CheckResult.PASSED, security_schemes\n\n filtered_dict = {}\n if isinstance(security_schemes, dict):\n for name, scheme in security_schemes.items():\n if isinstance(scheme, dict) and scheme.get('type') == \"apiKey\":\n filtered_dict[name] = scheme\n\n if not filtered_dict:\n return CheckResult.PASSED, security_schemes\n\n for key, path in paths.items():\n if not path:\n continue\n if key in LINE_FIELD_NAMES:\n continue\n for value in path.values():\n if not isinstance(value, dict):\n continue\n operation_security = value.get('security')\n if operation_security and isinstance(operation_security, list):\n for sec in operation_security[0]:\n if sec in filtered_dict:\n return CheckResult.FAILED, security_schemes\n\n return CheckResult.PASSED, conf\n\n\ncheck = ClearTestAPIKey()\n", "path": "checkov/openapi/checks/resource/generic/ClearTextAPIKey.py"}]}
| 1,528 | 273 |
gh_patches_debug_4711
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4323
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mac OS X, OS X → macOS
We have a few references in the documentation where we use the old name of that OS. We should update them.
</issue>
<code>
[start of scrapy/extensions/memusage.py]
1 """
2 MemoryUsage extension
3
4 See documentation in docs/topics/extensions.rst
5 """
6 import sys
7 import socket
8 import logging
9 from pprint import pformat
10 from importlib import import_module
11
12 from twisted.internet import task
13
14 from scrapy import signals
15 from scrapy.exceptions import NotConfigured
16 from scrapy.mail import MailSender
17 from scrapy.utils.engine import get_engine_status
18
19 logger = logging.getLogger(__name__)
20
21
22 class MemoryUsage(object):
23
24 def __init__(self, crawler):
25 if not crawler.settings.getbool('MEMUSAGE_ENABLED'):
26 raise NotConfigured
27 try:
28 # stdlib's resource module is only available on unix platforms.
29 self.resource = import_module('resource')
30 except ImportError:
31 raise NotConfigured
32
33 self.crawler = crawler
34 self.warned = False
35 self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')
36 self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024
37 self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024
38 self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')
39 self.mail = MailSender.from_settings(crawler.settings)
40 crawler.signals.connect(self.engine_started, signal=signals.engine_started)
41 crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)
42
43 @classmethod
44 def from_crawler(cls, crawler):
45 return cls(crawler)
46
47 def get_virtual_size(self):
48 size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss
49 if sys.platform != 'darwin':
50 # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB
51 size *= 1024
52 return size
53
54 def engine_started(self):
55 self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())
56 self.tasks = []
57 tsk = task.LoopingCall(self.update)
58 self.tasks.append(tsk)
59 tsk.start(self.check_interval, now=True)
60 if self.limit:
61 tsk = task.LoopingCall(self._check_limit)
62 self.tasks.append(tsk)
63 tsk.start(self.check_interval, now=True)
64 if self.warning:
65 tsk = task.LoopingCall(self._check_warning)
66 self.tasks.append(tsk)
67 tsk.start(self.check_interval, now=True)
68
69 def engine_stopped(self):
70 for tsk in self.tasks:
71 if tsk.running:
72 tsk.stop()
73
74 def update(self):
75 self.crawler.stats.max_value('memusage/max', self.get_virtual_size())
76
77 def _check_limit(self):
78 if self.get_virtual_size() > self.limit:
79 self.crawler.stats.set_value('memusage/limit_reached', 1)
80 mem = self.limit/1024/1024
81 logger.error("Memory usage exceeded %(memusage)dM. Shutting down Scrapy...",
82 {'memusage': mem}, extra={'crawler': self.crawler})
83 if self.notify_mails:
84 subj = "%s terminated: memory usage exceeded %dM at %s" % \
85 (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())
86 self._send_report(self.notify_mails, subj)
87 self.crawler.stats.set_value('memusage/limit_notified', 1)
88
89 open_spiders = self.crawler.engine.open_spiders
90 if open_spiders:
91 for spider in open_spiders:
92 self.crawler.engine.close_spider(spider, 'memusage_exceeded')
93 else:
94 self.crawler.stop()
95
96 def _check_warning(self):
97 if self.warned: # warn only once
98 return
99 if self.get_virtual_size() > self.warning:
100 self.crawler.stats.set_value('memusage/warning_reached', 1)
101 mem = self.warning/1024/1024
102 logger.warning("Memory usage reached %(memusage)dM",
103 {'memusage': mem}, extra={'crawler': self.crawler})
104 if self.notify_mails:
105 subj = "%s warning: memory usage reached %dM at %s" % \
106 (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())
107 self._send_report(self.notify_mails, subj)
108 self.crawler.stats.set_value('memusage/warning_notified', 1)
109 self.warned = True
110
111 def _send_report(self, rcpts, subject):
112 """send notification mail with some additional useful info"""
113 stats = self.crawler.stats
114 s = "Memory usage at engine startup : %dM\r\n" % (stats.get_value('memusage/startup')/1024/1024)
115 s += "Maximum memory usage : %dM\r\n" % (stats.get_value('memusage/max')/1024/1024)
116 s += "Current memory usage : %dM\r\n" % (self.get_virtual_size()/1024/1024)
117
118 s += "ENGINE STATUS ------------------------------------------------------- \r\n"
119 s += "\r\n"
120 s += pformat(get_engine_status(self.crawler.engine))
121 s += "\r\n"
122 self.mail.send(rcpts, subject, s)
123
[end of scrapy/extensions/memusage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/extensions/memusage.py b/scrapy/extensions/memusage.py
--- a/scrapy/extensions/memusage.py
+++ b/scrapy/extensions/memusage.py
@@ -47,7 +47,7 @@
def get_virtual_size(self):
size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss
if sys.platform != 'darwin':
- # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB
+ # on macOS ru_maxrss is in bytes, on Linux it is in KB
size *= 1024
return size
|
{"golden_diff": "diff --git a/scrapy/extensions/memusage.py b/scrapy/extensions/memusage.py\n--- a/scrapy/extensions/memusage.py\n+++ b/scrapy/extensions/memusage.py\n@@ -47,7 +47,7 @@\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n- # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB\n+ # on macOS ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n", "issue": "Mac OS X, OS X \u2192 macOS\nWe have a few references in the documentation where we use the old name of that OS. We should update them.\n", "before_files": [{"content": "\"\"\"\nMemoryUsage extension\n\nSee documentation in docs/topics/extensions.rst\n\"\"\"\nimport sys\nimport socket\nimport logging\nfrom pprint import pformat\nfrom importlib import import_module\n\nfrom twisted.internet import task\n\nfrom scrapy import signals\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.mail import MailSender\nfrom scrapy.utils.engine import get_engine_status\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemoryUsage(object):\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('MEMUSAGE_ENABLED'):\n raise NotConfigured\n try:\n # stdlib's resource module is only available on unix platforms.\n self.resource = import_module('resource')\n except ImportError:\n raise NotConfigured\n\n self.crawler = crawler\n self.warned = False\n self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')\n self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024\n self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024\n self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')\n self.mail = MailSender.from_settings(crawler.settings)\n crawler.signals.connect(self.engine_started, signal=signals.engine_started)\n crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n\n def engine_started(self):\n self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())\n self.tasks = []\n tsk = task.LoopingCall(self.update)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.limit:\n tsk = task.LoopingCall(self._check_limit)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.warning:\n tsk = task.LoopingCall(self._check_warning)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n\n def engine_stopped(self):\n for tsk in self.tasks:\n if tsk.running:\n tsk.stop()\n\n def update(self):\n self.crawler.stats.max_value('memusage/max', self.get_virtual_size())\n\n def _check_limit(self):\n if self.get_virtual_size() > self.limit:\n self.crawler.stats.set_value('memusage/limit_reached', 1)\n mem = self.limit/1024/1024\n logger.error(\"Memory usage exceeded %(memusage)dM. Shutting down Scrapy...\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = \"%s terminated: memory usage exceeded %dM at %s\" % \\\n (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/limit_notified', 1)\n\n open_spiders = self.crawler.engine.open_spiders\n if open_spiders:\n for spider in open_spiders:\n self.crawler.engine.close_spider(spider, 'memusage_exceeded')\n else:\n self.crawler.stop()\n\n def _check_warning(self):\n if self.warned: # warn only once\n return\n if self.get_virtual_size() > self.warning:\n self.crawler.stats.set_value('memusage/warning_reached', 1)\n mem = self.warning/1024/1024\n logger.warning(\"Memory usage reached %(memusage)dM\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = \"%s warning: memory usage reached %dM at %s\" % \\\n (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/warning_notified', 1)\n self.warned = True\n\n def _send_report(self, rcpts, subject):\n \"\"\"send notification mail with some additional useful info\"\"\"\n stats = self.crawler.stats\n s = \"Memory usage at engine startup : %dM\\r\\n\" % (stats.get_value('memusage/startup')/1024/1024)\n s += \"Maximum memory usage : %dM\\r\\n\" % (stats.get_value('memusage/max')/1024/1024)\n s += \"Current memory usage : %dM\\r\\n\" % (self.get_virtual_size()/1024/1024)\n\n s += \"ENGINE STATUS ------------------------------------------------------- \\r\\n\"\n s += \"\\r\\n\"\n s += pformat(get_engine_status(self.crawler.engine))\n s += \"\\r\\n\"\n self.mail.send(rcpts, subject, s)\n", "path": "scrapy/extensions/memusage.py"}]}
| 1,967 | 138 |
gh_patches_debug_43040
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-6999
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `prefect cloud logout` command
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar request and didn't find it.
- [X] I searched the Prefect documentation for this feature.
### Prefect Version
2.x
### Describe the current behavior
Add `prefect cloud logout` to log out of a workspace
### Describe the proposed behavior
Likely by unsetting the `PREFECT_API_KEY` and `PREFECT_API_URL` back to default
### Example Use
_No response_
### Additional context
_No response_
</issue>
<code>
[start of src/prefect/cli/cloud.py]
1 """
2 Command line interface for interacting with Prefect Cloud
3 """
4 import re
5 from typing import Dict, Iterable
6
7 import httpx
8 import readchar
9 import typer
10 from rich.live import Live
11 from rich.table import Table
12
13 import prefect.context
14 import prefect.settings
15 from prefect.cli._types import PrefectTyper
16 from prefect.cli._utilities import exit_with_error, exit_with_success
17 from prefect.cli.root import app
18 from prefect.client.cloud import CloudUnauthorizedError, get_cloud_client
19 from prefect.settings import (
20 PREFECT_API_KEY,
21 PREFECT_API_URL,
22 PREFECT_CLOUD_URL,
23 load_profiles,
24 save_profiles,
25 update_current_profile,
26 )
27
28 cloud_app = PrefectTyper(
29 name="cloud", help="Commands for interacting with Prefect Cloud"
30 )
31 workspace_app = PrefectTyper(
32 name="workspace", help="Commands for interacting with Prefect Cloud Workspaces"
33 )
34 cloud_app.add_typer(workspace_app, aliases=["workspaces"])
35 app.add_typer(cloud_app)
36
37
38 def build_url_from_workspace(workspace: Dict) -> str:
39 return (
40 f"{PREFECT_CLOUD_URL.value()}"
41 f"/accounts/{workspace['account_id']}"
42 f"/workspaces/{workspace['workspace_id']}"
43 )
44
45
46 def confirm_logged_in():
47 if not PREFECT_API_KEY:
48 profile = prefect.context.get_settings_context().profile
49 exit_with_error(
50 f"Currently not authenticated in profile {profile.name!r}. "
51 "Please login with `prefect cloud login --key <API_KEY>`."
52 )
53
54
55 def get_current_workspace(workspaces):
56 workspace_handles_by_id = {
57 workspace[
58 "workspace_id"
59 ]: f"{workspace['account_handle']}/{workspace['workspace_handle']}"
60 for workspace in workspaces
61 }
62 current_workspace_id = re.match(
63 r".*accounts/.{36}/workspaces/(.{36})\Z", PREFECT_API_URL.value()
64 ).groups()[0]
65 return workspace_handles_by_id[current_workspace_id]
66
67
68 def build_table(selected_idx: int, workspaces: Iterable[str]) -> Table:
69 """
70 Generate a table of workspaces. The `select_idx` of workspaces will be highlighted.
71
72 Args:
73 selected_idx: currently selected index
74 workspaces: Iterable of strings
75
76 Returns:
77 rich.table.Table
78 """
79
80 table = Table()
81 table.add_column(
82 "[#024dfd]Select a Workspace:", justify="right", style="#8ea0ae", no_wrap=True
83 )
84
85 for i, workspace in enumerate(sorted(workspaces)):
86 if i == selected_idx:
87 table.add_row("[#024dfd on #FFFFFF]> " + workspace)
88 else:
89 table.add_row(" " + workspace)
90 return table
91
92
93 def select_workspace(workspaces: Iterable[str]) -> str:
94 """
95 Given a list of workspaces, display them to user in a Table
96 and allow them to select one.
97
98 Args:
99 workspaces: List of workspaces to choose from
100
101 Returns:
102 str: the selected workspace
103 """
104
105 workspaces = sorted(workspaces)
106 current_idx = 0
107 selected_workspace = None
108
109 with Live(
110 build_table(current_idx, workspaces), auto_refresh=False, console=app.console
111 ) as live:
112 while selected_workspace is None:
113 key = readchar.readkey()
114
115 if key == readchar.key.UP:
116 current_idx = current_idx - 1
117 # wrap to bottom if at the top
118 if current_idx < 0:
119 current_idx = len(workspaces) - 1
120 elif key == readchar.key.DOWN:
121 current_idx = current_idx + 1
122 # wrap to top if at the bottom
123 if current_idx >= len(workspaces):
124 current_idx = 0
125 elif key == readchar.key.CTRL_C:
126 # gracefully exit with no message
127 exit_with_error("")
128 elif key == readchar.key.ENTER:
129 selected_workspace = workspaces[current_idx]
130
131 live.update(build_table(current_idx, workspaces), refresh=True)
132
133 return selected_workspace
134
135
136 @cloud_app.command()
137 async def login(
138 key: str = typer.Option(
139 ..., "--key", "-k", help="API Key to authenticate with Prefect", prompt=True
140 ),
141 workspace_handle: str = typer.Option(
142 None,
143 "--workspace",
144 "-w",
145 help="Full handle of workspace, in format '<account_handle>/<workspace_handle>'",
146 ),
147 ):
148 """
149 Log in to Prefect Cloud.
150 Creates a new profile configured to use the specified PREFECT_API_KEY.
151 Uses a previously configured profile if it exists.
152 """
153 profiles = load_profiles()
154
155 async with get_cloud_client(api_key=key) as client:
156 try:
157 workspaces = await client.read_workspaces()
158 workspace_handle_details = {
159 f"{workspace['account_handle']}/{workspace['workspace_handle']}": workspace
160 for workspace in workspaces
161 }
162 except CloudUnauthorizedError:
163 if key.startswith("pcu"):
164 help_message = "It looks like you're using API key from Cloud 1 (https://cloud.prefect.io). Make sure that you generate API key using Cloud 2 (https://app.prefect.cloud)"
165 elif not key.startswith("pnu"):
166 help_message = "Your key is not in our expected format."
167 else:
168 help_message = "Please ensure your credentials are correct."
169 exit_with_error(
170 f"Unable to authenticate with Prefect Cloud. {help_message}"
171 )
172 except httpx.HTTPStatusError as exc:
173 exit_with_error(f"Error connecting to Prefect Cloud: {exc!r}")
174
175 for profile_name in profiles:
176 if key == profiles[profile_name].settings.get(PREFECT_API_KEY):
177 profiles.set_active(profile_name)
178 save_profiles(profiles)
179 with prefect.context.use_profile(profile_name):
180 current_workspace = get_current_workspace(workspaces)
181
182 if workspace_handle is not None:
183 if workspace_handle not in workspace_handle_details:
184 exit_with_error(f"Workspace {workspace_handle!r} not found.")
185
186 update_current_profile(
187 {
188 PREFECT_API_URL: build_url_from_workspace(
189 workspace_handle_details[workspace_handle]
190 )
191 }
192 )
193 current_workspace = workspace_handle
194
195 exit_with_success(
196 f"Logged in to Prefect Cloud using profile {profile_name!r}.\n"
197 f"Workspace is currently set to {current_workspace!r}. "
198 f"The workspace can be changed using `prefect cloud workspace set`."
199 )
200
201 workspace_handle_details = {
202 f"{workspace['account_handle']}/{workspace['workspace_handle']}": workspace
203 for workspace in workspaces
204 }
205
206 if not workspace_handle:
207 workspace_handle = select_workspace(workspace_handle_details.keys())
208
209 cloud_profile_name = app.console.input(
210 "Creating a profile for this Prefect Cloud login. Please specify a profile name: "
211 )
212
213 cloud_profile_name = cloud_profile_name.strip()
214 if cloud_profile_name == "":
215 exit_with_error("Please provide a non-empty profile name.")
216
217 if cloud_profile_name in profiles:
218 exit_with_error(f"Profile {cloud_profile_name!r} already exists.")
219
220 profiles.add_profile(
221 profiles[profiles.active_name].copy(
222 update={
223 "name": cloud_profile_name,
224 }
225 )
226 )
227
228 profiles.update_profile(
229 cloud_profile_name,
230 {
231 PREFECT_API_URL: build_url_from_workspace(
232 workspace_handle_details[workspace_handle]
233 ),
234 PREFECT_API_KEY: key,
235 },
236 )
237
238 profiles.set_active(cloud_profile_name)
239 save_profiles(profiles)
240
241 exit_with_success(
242 f"Logged in to Prefect Cloud using profile {cloud_profile_name!r}.\n"
243 f"Workspace is currently set to {workspace_handle!r}. "
244 f"The workspace can be changed using `prefect cloud workspace set`."
245 )
246
247
248 @workspace_app.command()
249 async def ls():
250 """List available workspaces."""
251
252 confirm_logged_in()
253
254 async with get_cloud_client() as client:
255 try:
256 workspaces = await client.read_workspaces()
257 except CloudUnauthorizedError:
258 exit_with_error(
259 "Unable to authenticate. Please ensure your credentials are correct."
260 )
261
262 workspace_handle_details = {
263 f"{workspace['account_handle']}/{workspace['workspace_handle']}": workspace
264 for workspace in workspaces
265 }
266
267 current_workspace = get_current_workspace(workspaces)
268
269 table = Table(caption="* active workspace")
270 table.add_column(
271 "[#024dfd]Available Workspaces:", justify="right", style="#8ea0ae", no_wrap=True
272 )
273
274 for i, workspace_handle in enumerate(sorted(workspace_handle_details)):
275 if workspace_handle == current_workspace:
276 table.add_row(f"[green] * {workspace_handle}[/green]")
277 else:
278 table.add_row(f" {workspace_handle}")
279 app.console.print(table)
280
281
282 @workspace_app.command()
283 async def set(
284 workspace_handle: str = typer.Option(
285 None,
286 "--workspace",
287 "-w",
288 help="Full handle of workspace, in format '<account_handle>/<workspace_handle>'",
289 ),
290 ):
291 """Set current workspace. Shows a workspace picker if no workspace is specified."""
292 confirm_logged_in()
293
294 async with get_cloud_client() as client:
295 try:
296 workspaces = await client.read_workspaces()
297 except CloudUnauthorizedError:
298 exit_with_error(
299 "Unable to authenticate. Please ensure your credentials are correct."
300 )
301 workspaces = {
302 f"{workspace['account_handle']}/{workspace['workspace_handle']}": workspace
303 for workspace in workspaces
304 }
305
306 if not workspace_handle:
307 workspace_handle = select_workspace(workspaces)
308
309 if workspace_handle not in workspaces:
310 exit_with_error(
311 f"Workspace {workspace_handle!r} not found. "
312 "Leave `--workspace` blank to select a workspace."
313 )
314
315 profile = update_current_profile(
316 {PREFECT_API_URL: build_url_from_workspace(workspaces[workspace_handle])}
317 )
318
319 exit_with_success(
320 f"Successfully set workspace to {workspace_handle!r} in profile {profile.name!r}."
321 )
322
[end of src/prefect/cli/cloud.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/prefect/cli/cloud.py b/src/prefect/cli/cloud.py
--- a/src/prefect/cli/cloud.py
+++ b/src/prefect/cli/cloud.py
@@ -150,8 +150,6 @@
Creates a new profile configured to use the specified PREFECT_API_KEY.
Uses a previously configured profile if it exists.
"""
- profiles = load_profiles()
-
async with get_cloud_client(api_key=key) as client:
try:
workspaces = await client.read_workspaces()
@@ -172,6 +170,8 @@
except httpx.HTTPStatusError as exc:
exit_with_error(f"Error connecting to Prefect Cloud: {exc!r}")
+ profiles = load_profiles()
+
for profile_name in profiles:
if key == profiles[profile_name].settings.get(PREFECT_API_KEY):
profiles.set_active(profile_name)
@@ -192,41 +192,19 @@
)
current_workspace = workspace_handle
- exit_with_success(
- f"Logged in to Prefect Cloud using profile {profile_name!r}.\n"
- f"Workspace is currently set to {current_workspace!r}. "
- f"The workspace can be changed using `prefect cloud workspace set`."
- )
-
- workspace_handle_details = {
- f"{workspace['account_handle']}/{workspace['workspace_handle']}": workspace
- for workspace in workspaces
- }
+ exit_with_success(
+ f"Logged in to Prefect Cloud using profile {profile_name!r}.\n"
+ f"Workspace is currently set to {current_workspace!r}. "
+ f"The workspace can be changed using `prefect cloud workspace set`."
+ )
if not workspace_handle:
workspace_handle = select_workspace(workspace_handle_details.keys())
- cloud_profile_name = app.console.input(
- "Creating a profile for this Prefect Cloud login. Please specify a profile name: "
- )
-
- cloud_profile_name = cloud_profile_name.strip()
- if cloud_profile_name == "":
- exit_with_error("Please provide a non-empty profile name.")
+ if workspace_handle not in workspace_handle_details:
+ exit_with_error(f"Workspace {workspace_handle!r} not found.")
- if cloud_profile_name in profiles:
- exit_with_error(f"Profile {cloud_profile_name!r} already exists.")
-
- profiles.add_profile(
- profiles[profiles.active_name].copy(
- update={
- "name": cloud_profile_name,
- }
- )
- )
-
- profiles.update_profile(
- cloud_profile_name,
+ current_profile = update_current_profile(
{
PREFECT_API_URL: build_url_from_workspace(
workspace_handle_details[workspace_handle]
@@ -235,16 +213,36 @@
},
)
- profiles.set_active(cloud_profile_name)
- save_profiles(profiles)
-
exit_with_success(
- f"Logged in to Prefect Cloud using profile {cloud_profile_name!r}.\n"
+ f"Logged in to Prefect Cloud using profile {current_profile.name!r}.\n"
f"Workspace is currently set to {workspace_handle!r}. "
f"The workspace can be changed using `prefect cloud workspace set`."
)
+@cloud_app.command()
+async def logout():
+ """
+ Logout the current workspace.
+ Reset PREFECT_API_KEY and PREFECT_API_URL to default.
+ """
+ current_profile = prefect.context.get_settings_context().profile
+ if current_profile is None:
+ exit_with_error("There is no current profile set.")
+
+ if current_profile.settings.get(PREFECT_API_KEY) is None:
+ exit_with_error("Current profile is not logged into Prefect Cloud.")
+
+ update_current_profile(
+ {
+ PREFECT_API_URL: None,
+ PREFECT_API_KEY: None,
+ },
+ )
+
+ exit_with_success("Logged out from Prefect Cloud.")
+
+
@workspace_app.command()
async def ls():
"""List available workspaces."""
|
{"golden_diff": "diff --git a/src/prefect/cli/cloud.py b/src/prefect/cli/cloud.py\n--- a/src/prefect/cli/cloud.py\n+++ b/src/prefect/cli/cloud.py\n@@ -150,8 +150,6 @@\n Creates a new profile configured to use the specified PREFECT_API_KEY.\n Uses a previously configured profile if it exists.\n \"\"\"\n- profiles = load_profiles()\n-\n async with get_cloud_client(api_key=key) as client:\n try:\n workspaces = await client.read_workspaces()\n@@ -172,6 +170,8 @@\n except httpx.HTTPStatusError as exc:\n exit_with_error(f\"Error connecting to Prefect Cloud: {exc!r}\")\n \n+ profiles = load_profiles()\n+\n for profile_name in profiles:\n if key == profiles[profile_name].settings.get(PREFECT_API_KEY):\n profiles.set_active(profile_name)\n@@ -192,41 +192,19 @@\n )\n current_workspace = workspace_handle\n \n- exit_with_success(\n- f\"Logged in to Prefect Cloud using profile {profile_name!r}.\\n\"\n- f\"Workspace is currently set to {current_workspace!r}. \"\n- f\"The workspace can be changed using `prefect cloud workspace set`.\"\n- )\n-\n- workspace_handle_details = {\n- f\"{workspace['account_handle']}/{workspace['workspace_handle']}\": workspace\n- for workspace in workspaces\n- }\n+ exit_with_success(\n+ f\"Logged in to Prefect Cloud using profile {profile_name!r}.\\n\"\n+ f\"Workspace is currently set to {current_workspace!r}. \"\n+ f\"The workspace can be changed using `prefect cloud workspace set`.\"\n+ )\n \n if not workspace_handle:\n workspace_handle = select_workspace(workspace_handle_details.keys())\n \n- cloud_profile_name = app.console.input(\n- \"Creating a profile for this Prefect Cloud login. Please specify a profile name: \"\n- )\n-\n- cloud_profile_name = cloud_profile_name.strip()\n- if cloud_profile_name == \"\":\n- exit_with_error(\"Please provide a non-empty profile name.\")\n+ if workspace_handle not in workspace_handle_details:\n+ exit_with_error(f\"Workspace {workspace_handle!r} not found.\")\n \n- if cloud_profile_name in profiles:\n- exit_with_error(f\"Profile {cloud_profile_name!r} already exists.\")\n-\n- profiles.add_profile(\n- profiles[profiles.active_name].copy(\n- update={\n- \"name\": cloud_profile_name,\n- }\n- )\n- )\n-\n- profiles.update_profile(\n- cloud_profile_name,\n+ current_profile = update_current_profile(\n {\n PREFECT_API_URL: build_url_from_workspace(\n workspace_handle_details[workspace_handle]\n@@ -235,16 +213,36 @@\n },\n )\n \n- profiles.set_active(cloud_profile_name)\n- save_profiles(profiles)\n-\n exit_with_success(\n- f\"Logged in to Prefect Cloud using profile {cloud_profile_name!r}.\\n\"\n+ f\"Logged in to Prefect Cloud using profile {current_profile.name!r}.\\n\"\n f\"Workspace is currently set to {workspace_handle!r}. \"\n f\"The workspace can be changed using `prefect cloud workspace set`.\"\n )\n \n \n+@cloud_app.command()\n+async def logout():\n+ \"\"\"\n+ Logout the current workspace.\n+ Reset PREFECT_API_KEY and PREFECT_API_URL to default.\n+ \"\"\"\n+ current_profile = prefect.context.get_settings_context().profile\n+ if current_profile is None:\n+ exit_with_error(\"There is no current profile set.\")\n+\n+ if current_profile.settings.get(PREFECT_API_KEY) is None:\n+ exit_with_error(\"Current profile is not logged into Prefect Cloud.\")\n+\n+ update_current_profile(\n+ {\n+ PREFECT_API_URL: None,\n+ PREFECT_API_KEY: None,\n+ },\n+ )\n+\n+ exit_with_success(\"Logged out from Prefect Cloud.\")\n+\n+\n @workspace_app.command()\n async def ls():\n \"\"\"List available workspaces.\"\"\"\n", "issue": "Add `prefect cloud logout` command\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar request and didn't find it.\n- [X] I searched the Prefect documentation for this feature.\n\n### Prefect Version\n\n2.x\n\n### Describe the current behavior\n\nAdd `prefect cloud logout` to log out of a workspace \n\n### Describe the proposed behavior\n\nLikely by unsetting the `PREFECT_API_KEY` and `PREFECT_API_URL` back to default\n\n### Example Use\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nCommand line interface for interacting with Prefect Cloud\n\"\"\"\nimport re\nfrom typing import Dict, Iterable\n\nimport httpx\nimport readchar\nimport typer\nfrom rich.live import Live\nfrom rich.table import Table\n\nimport prefect.context\nimport prefect.settings\nfrom prefect.cli._types import PrefectTyper\nfrom prefect.cli._utilities import exit_with_error, exit_with_success\nfrom prefect.cli.root import app\nfrom prefect.client.cloud import CloudUnauthorizedError, get_cloud_client\nfrom prefect.settings import (\n PREFECT_API_KEY,\n PREFECT_API_URL,\n PREFECT_CLOUD_URL,\n load_profiles,\n save_profiles,\n update_current_profile,\n)\n\ncloud_app = PrefectTyper(\n name=\"cloud\", help=\"Commands for interacting with Prefect Cloud\"\n)\nworkspace_app = PrefectTyper(\n name=\"workspace\", help=\"Commands for interacting with Prefect Cloud Workspaces\"\n)\ncloud_app.add_typer(workspace_app, aliases=[\"workspaces\"])\napp.add_typer(cloud_app)\n\n\ndef build_url_from_workspace(workspace: Dict) -> str:\n return (\n f\"{PREFECT_CLOUD_URL.value()}\"\n f\"/accounts/{workspace['account_id']}\"\n f\"/workspaces/{workspace['workspace_id']}\"\n )\n\n\ndef confirm_logged_in():\n if not PREFECT_API_KEY:\n profile = prefect.context.get_settings_context().profile\n exit_with_error(\n f\"Currently not authenticated in profile {profile.name!r}. \"\n \"Please login with `prefect cloud login --key <API_KEY>`.\"\n )\n\n\ndef get_current_workspace(workspaces):\n workspace_handles_by_id = {\n workspace[\n \"workspace_id\"\n ]: f\"{workspace['account_handle']}/{workspace['workspace_handle']}\"\n for workspace in workspaces\n }\n current_workspace_id = re.match(\n r\".*accounts/.{36}/workspaces/(.{36})\\Z\", PREFECT_API_URL.value()\n ).groups()[0]\n return workspace_handles_by_id[current_workspace_id]\n\n\ndef build_table(selected_idx: int, workspaces: Iterable[str]) -> Table:\n \"\"\"\n Generate a table of workspaces. The `select_idx` of workspaces will be highlighted.\n\n Args:\n selected_idx: currently selected index\n workspaces: Iterable of strings\n\n Returns:\n rich.table.Table\n \"\"\"\n\n table = Table()\n table.add_column(\n \"[#024dfd]Select a Workspace:\", justify=\"right\", style=\"#8ea0ae\", no_wrap=True\n )\n\n for i, workspace in enumerate(sorted(workspaces)):\n if i == selected_idx:\n table.add_row(\"[#024dfd on #FFFFFF]> \" + workspace)\n else:\n table.add_row(\" \" + workspace)\n return table\n\n\ndef select_workspace(workspaces: Iterable[str]) -> str:\n \"\"\"\n Given a list of workspaces, display them to user in a Table\n and allow them to select one.\n\n Args:\n workspaces: List of workspaces to choose from\n\n Returns:\n str: the selected workspace\n \"\"\"\n\n workspaces = sorted(workspaces)\n current_idx = 0\n selected_workspace = None\n\n with Live(\n build_table(current_idx, workspaces), auto_refresh=False, console=app.console\n ) as live:\n while selected_workspace is None:\n key = readchar.readkey()\n\n if key == readchar.key.UP:\n current_idx = current_idx - 1\n # wrap to bottom if at the top\n if current_idx < 0:\n current_idx = len(workspaces) - 1\n elif key == readchar.key.DOWN:\n current_idx = current_idx + 1\n # wrap to top if at the bottom\n if current_idx >= len(workspaces):\n current_idx = 0\n elif key == readchar.key.CTRL_C:\n # gracefully exit with no message\n exit_with_error(\"\")\n elif key == readchar.key.ENTER:\n selected_workspace = workspaces[current_idx]\n\n live.update(build_table(current_idx, workspaces), refresh=True)\n\n return selected_workspace\n\n\n@cloud_app.command()\nasync def login(\n key: str = typer.Option(\n ..., \"--key\", \"-k\", help=\"API Key to authenticate with Prefect\", prompt=True\n ),\n workspace_handle: str = typer.Option(\n None,\n \"--workspace\",\n \"-w\",\n help=\"Full handle of workspace, in format '<account_handle>/<workspace_handle>'\",\n ),\n):\n \"\"\"\n Log in to Prefect Cloud.\n Creates a new profile configured to use the specified PREFECT_API_KEY.\n Uses a previously configured profile if it exists.\n \"\"\"\n profiles = load_profiles()\n\n async with get_cloud_client(api_key=key) as client:\n try:\n workspaces = await client.read_workspaces()\n workspace_handle_details = {\n f\"{workspace['account_handle']}/{workspace['workspace_handle']}\": workspace\n for workspace in workspaces\n }\n except CloudUnauthorizedError:\n if key.startswith(\"pcu\"):\n help_message = \"It looks like you're using API key from Cloud 1 (https://cloud.prefect.io). Make sure that you generate API key using Cloud 2 (https://app.prefect.cloud)\"\n elif not key.startswith(\"pnu\"):\n help_message = \"Your key is not in our expected format.\"\n else:\n help_message = \"Please ensure your credentials are correct.\"\n exit_with_error(\n f\"Unable to authenticate with Prefect Cloud. {help_message}\"\n )\n except httpx.HTTPStatusError as exc:\n exit_with_error(f\"Error connecting to Prefect Cloud: {exc!r}\")\n\n for profile_name in profiles:\n if key == profiles[profile_name].settings.get(PREFECT_API_KEY):\n profiles.set_active(profile_name)\n save_profiles(profiles)\n with prefect.context.use_profile(profile_name):\n current_workspace = get_current_workspace(workspaces)\n\n if workspace_handle is not None:\n if workspace_handle not in workspace_handle_details:\n exit_with_error(f\"Workspace {workspace_handle!r} not found.\")\n\n update_current_profile(\n {\n PREFECT_API_URL: build_url_from_workspace(\n workspace_handle_details[workspace_handle]\n )\n }\n )\n current_workspace = workspace_handle\n\n exit_with_success(\n f\"Logged in to Prefect Cloud using profile {profile_name!r}.\\n\"\n f\"Workspace is currently set to {current_workspace!r}. \"\n f\"The workspace can be changed using `prefect cloud workspace set`.\"\n )\n\n workspace_handle_details = {\n f\"{workspace['account_handle']}/{workspace['workspace_handle']}\": workspace\n for workspace in workspaces\n }\n\n if not workspace_handle:\n workspace_handle = select_workspace(workspace_handle_details.keys())\n\n cloud_profile_name = app.console.input(\n \"Creating a profile for this Prefect Cloud login. Please specify a profile name: \"\n )\n\n cloud_profile_name = cloud_profile_name.strip()\n if cloud_profile_name == \"\":\n exit_with_error(\"Please provide a non-empty profile name.\")\n\n if cloud_profile_name in profiles:\n exit_with_error(f\"Profile {cloud_profile_name!r} already exists.\")\n\n profiles.add_profile(\n profiles[profiles.active_name].copy(\n update={\n \"name\": cloud_profile_name,\n }\n )\n )\n\n profiles.update_profile(\n cloud_profile_name,\n {\n PREFECT_API_URL: build_url_from_workspace(\n workspace_handle_details[workspace_handle]\n ),\n PREFECT_API_KEY: key,\n },\n )\n\n profiles.set_active(cloud_profile_name)\n save_profiles(profiles)\n\n exit_with_success(\n f\"Logged in to Prefect Cloud using profile {cloud_profile_name!r}.\\n\"\n f\"Workspace is currently set to {workspace_handle!r}. \"\n f\"The workspace can be changed using `prefect cloud workspace set`.\"\n )\n\n\n@workspace_app.command()\nasync def ls():\n \"\"\"List available workspaces.\"\"\"\n\n confirm_logged_in()\n\n async with get_cloud_client() as client:\n try:\n workspaces = await client.read_workspaces()\n except CloudUnauthorizedError:\n exit_with_error(\n \"Unable to authenticate. Please ensure your credentials are correct.\"\n )\n\n workspace_handle_details = {\n f\"{workspace['account_handle']}/{workspace['workspace_handle']}\": workspace\n for workspace in workspaces\n }\n\n current_workspace = get_current_workspace(workspaces)\n\n table = Table(caption=\"* active workspace\")\n table.add_column(\n \"[#024dfd]Available Workspaces:\", justify=\"right\", style=\"#8ea0ae\", no_wrap=True\n )\n\n for i, workspace_handle in enumerate(sorted(workspace_handle_details)):\n if workspace_handle == current_workspace:\n table.add_row(f\"[green] * {workspace_handle}[/green]\")\n else:\n table.add_row(f\" {workspace_handle}\")\n app.console.print(table)\n\n\n@workspace_app.command()\nasync def set(\n workspace_handle: str = typer.Option(\n None,\n \"--workspace\",\n \"-w\",\n help=\"Full handle of workspace, in format '<account_handle>/<workspace_handle>'\",\n ),\n):\n \"\"\"Set current workspace. Shows a workspace picker if no workspace is specified.\"\"\"\n confirm_logged_in()\n\n async with get_cloud_client() as client:\n try:\n workspaces = await client.read_workspaces()\n except CloudUnauthorizedError:\n exit_with_error(\n \"Unable to authenticate. Please ensure your credentials are correct.\"\n )\n workspaces = {\n f\"{workspace['account_handle']}/{workspace['workspace_handle']}\": workspace\n for workspace in workspaces\n }\n\n if not workspace_handle:\n workspace_handle = select_workspace(workspaces)\n\n if workspace_handle not in workspaces:\n exit_with_error(\n f\"Workspace {workspace_handle!r} not found. \"\n \"Leave `--workspace` blank to select a workspace.\"\n )\n\n profile = update_current_profile(\n {PREFECT_API_URL: build_url_from_workspace(workspaces[workspace_handle])}\n )\n\n exit_with_success(\n f\"Successfully set workspace to {workspace_handle!r} in profile {profile.name!r}.\"\n )\n", "path": "src/prefect/cli/cloud.py"}]}
| 3,732 | 903 |
gh_patches_debug_20026
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-1063
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[niconico_dmc] Heartbeat failed with SOCKS5 `HTTPS?_PROXY` env
## Checklist
- [x] I'm reporting a broken site support
- [x] I've verified that I'm running yt-dlp version **2021.09.02**
- [x] I've checked that all provided URLs are alive and playable in a browser
- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [x] I've searched the bugtracker for similar issues including closed ones
## Verbose log
```
$ HTTP_PROXY=socks5://127.0.0.1:10080 HTTPS_PROXY=socks5://127.0.0.1:10080 yt-dlp --verbose --proxy socks5://127.0.0.1:10080 https://www.nicovideo.jp/watch/sm9
[debug] Command-line config: ['--verbose', '--proxy', 'socks5://127.0.0.1:10080', 'https://www.nicovideo.jp/watch/sm9']
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8
[debug] yt-dlp version 2021.09.02 (source)
[debug] Python version 3.10.0rc2 (CPython 32bit) - Linux-3.10.49-armv7l-with-libc
[debug] exe versions: ffmpeg 4.4, ffprobe 4.4
[debug] Optional libraries: mutagen, pycryptodome, sqlite, websockets
[debug] Proxy map: {'http': 'socks5://127.0.0.1:10080', 'https': 'socks5://127.0.0.1:10080'}
[debug] [niconico] Extracting URL: https://www.nicovideo.jp/watch/sm9
[niconico] sm9: Downloading webpage
[niconico] sm9: Downloading video info page
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, vcodec:vp9.2(10), acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id
[debug] Default format spec: bestvideo*+bestaudio/best
[info] sm9: Downloading 1 format(s): h264_360p-aac_128kbps
[debug] Invoking downloader on "niconico_dmc:sm9/archive_h264_360p/archive_aac_128kbps"
[niconico_dmc] Downloading from DMC
[niconico] sm9: Downloading JSON metadata for h264_360p-aac_128kbps
[niconico] sm9: Acquiring permission for downloading video
[niconico_dmc] Heartbeat with 40 second interval ...
[niconico_dmc] Heartbeat failed
[download] Destination: 新・豪血寺一族 -煩悩解放 - レッツゴー!陰陽師 [sm9].mp4
[download] 25.4% of 26.95MiB at 174.88KiB/s ETA 01:57[niconico_dmc] Heartbeat failed
[download] 39.8% of 26.95MiB at 137.12KiB/s ETA 02:01[niconico_dmc] Heartbeat failed
[download] 53.6% of 26.95MiB at 124.49KiB/s ETA 01:42[download] Got server HTTP error: Downloaded 15136650 bytes, expected 28261021 bytes. Retrying (attempt 1 of 10) ...
```
## Description
There seem to be two separate issues:
- niconico_dmc heartbeat connection does not respect the `--proxy` option, and
- it cannot connect via a SOCKS5 server.
</issue>
<code>
[start of yt_dlp/downloader/niconico.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import threading
5
6 from .common import FileDownloader
7 from ..downloader import get_suitable_downloader
8 from ..extractor.niconico import NiconicoIE
9 from ..compat import compat_urllib_request
10
11
12 class NiconicoDmcFD(FileDownloader):
13 """ Downloading niconico douga from DMC with heartbeat """
14
15 FD_NAME = 'niconico_dmc'
16
17 def real_download(self, filename, info_dict):
18 self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
19
20 ie = NiconicoIE(self.ydl)
21 info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
22
23 fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
24
25 success = download_complete = False
26 timer = [None]
27 heartbeat_lock = threading.Lock()
28 heartbeat_url = heartbeat_info_dict['url']
29 heartbeat_data = heartbeat_info_dict['data'].encode()
30 heartbeat_interval = heartbeat_info_dict.get('interval', 30)
31
32 def heartbeat():
33 try:
34 compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
35 except Exception:
36 self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
37
38 with heartbeat_lock:
39 if not download_complete:
40 timer[0] = threading.Timer(heartbeat_interval, heartbeat)
41 timer[0].start()
42
43 heartbeat_info_dict['ping']()
44 self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
45 try:
46 heartbeat()
47 if type(fd).__name__ == 'HlsFD':
48 info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])
49 success = fd.real_download(filename, info_dict)
50 finally:
51 if heartbeat_lock:
52 with heartbeat_lock:
53 timer[0].cancel()
54 download_complete = True
55 return success
56
[end of yt_dlp/downloader/niconico.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py
--- a/yt_dlp/downloader/niconico.py
+++ b/yt_dlp/downloader/niconico.py
@@ -6,7 +6,7 @@
from .common import FileDownloader
from ..downloader import get_suitable_downloader
from ..extractor.niconico import NiconicoIE
-from ..compat import compat_urllib_request
+from ..utils import sanitized_Request
class NiconicoDmcFD(FileDownloader):
@@ -29,9 +29,11 @@
heartbeat_data = heartbeat_info_dict['data'].encode()
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
+ request = sanitized_Request(heartbeat_url, heartbeat_data)
+
def heartbeat():
try:
- compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
+ self.ydl.urlopen(request).read()
except Exception:
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
{"golden_diff": "diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py\n--- a/yt_dlp/downloader/niconico.py\n+++ b/yt_dlp/downloader/niconico.py\n@@ -6,7 +6,7 @@\n from .common import FileDownloader\n from ..downloader import get_suitable_downloader\n from ..extractor.niconico import NiconicoIE\n-from ..compat import compat_urllib_request\n+from ..utils import sanitized_Request\n \n \n class NiconicoDmcFD(FileDownloader):\n@@ -29,9 +29,11 @@\n heartbeat_data = heartbeat_info_dict['data'].encode()\n heartbeat_interval = heartbeat_info_dict.get('interval', 30)\n \n+ request = sanitized_Request(heartbeat_url, heartbeat_data)\n+\n def heartbeat():\n try:\n- compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)\n+ self.ydl.urlopen(request).read()\n except Exception:\n self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)\n", "issue": "[niconico_dmc] Heartbeat failed with SOCKS5 `HTTPS?_PROXY` env\n\r\n## Checklist\r\n\r\n- [x] I'm reporting a broken site support\r\n- [x] I've verified that I'm running yt-dlp version **2021.09.02**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar issues including closed ones\r\n\r\n\r\n## Verbose log\r\n\r\n```\r\n$ HTTP_PROXY=socks5://127.0.0.1:10080 HTTPS_PROXY=socks5://127.0.0.1:10080 yt-dlp --verbose --proxy socks5://127.0.0.1:10080 https://www.nicovideo.jp/watch/sm9\r\n[debug] Command-line config: ['--verbose', '--proxy', 'socks5://127.0.0.1:10080', 'https://www.nicovideo.jp/watch/sm9']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8\r\n[debug] yt-dlp version 2021.09.02 (source)\r\n[debug] Python version 3.10.0rc2 (CPython 32bit) - Linux-3.10.49-armv7l-with-libc\r\n[debug] exe versions: ffmpeg 4.4, ffprobe 4.4\r\n[debug] Optional libraries: mutagen, pycryptodome, sqlite, websockets\r\n[debug] Proxy map: {'http': 'socks5://127.0.0.1:10080', 'https': 'socks5://127.0.0.1:10080'}\r\n[debug] [niconico] Extracting URL: https://www.nicovideo.jp/watch/sm9\r\n[niconico] sm9: Downloading webpage\r\n[niconico] sm9: Downloading video info page\r\n[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, vcodec:vp9.2(10), acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id\r\n[debug] Default format spec: bestvideo*+bestaudio/best\r\n[info] sm9: Downloading 1 format(s): h264_360p-aac_128kbps\r\n[debug] Invoking downloader on \"niconico_dmc:sm9/archive_h264_360p/archive_aac_128kbps\"\r\n[niconico_dmc] Downloading from DMC\r\n[niconico] sm9: Downloading JSON metadata for h264_360p-aac_128kbps\r\n[niconico] sm9: Acquiring permission for downloading video\r\n[niconico_dmc] Heartbeat with 40 second interval ...\r\n[niconico_dmc] Heartbeat failed\r\n[download] Destination: \u65b0\u30fb\u8c6a\u8840\u5bfa\u4e00\u65cf -\u7169\u60a9\u89e3\u653e - \u30ec\u30c3\u30c4\u30b4\u30fc\uff01\u9670\u967d\u5e2b [sm9].mp4\r\n[download] 25.4% of 26.95MiB at 174.88KiB/s ETA 01:57[niconico_dmc] Heartbeat failed\r\n[download] 39.8% of 26.95MiB at 137.12KiB/s ETA 02:01[niconico_dmc] Heartbeat failed\r\n[download] 53.6% of 26.95MiB at 124.49KiB/s ETA 01:42[download] Got server HTTP error: Downloaded 15136650 bytes, expected 28261021 bytes. Retrying (attempt 1 of 10) ...\r\n```\r\n\r\n\r\n## Description\r\n\r\nThere seem to be two separate issues:\r\n - niconico_dmc heartbeat connection does not respect the `--proxy` option, and\r\n - it cannot connect via a SOCKS5 server.\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport threading\n\nfrom .common import FileDownloader\nfrom ..downloader import get_suitable_downloader\nfrom ..extractor.niconico import NiconicoIE\nfrom ..compat import compat_urllib_request\n\n\nclass NiconicoDmcFD(FileDownloader):\n \"\"\" Downloading niconico douga from DMC with heartbeat \"\"\"\n\n FD_NAME = 'niconico_dmc'\n\n def real_download(self, filename, info_dict):\n self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)\n\n ie = NiconicoIE(self.ydl)\n info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)\n\n fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)\n\n success = download_complete = False\n timer = [None]\n heartbeat_lock = threading.Lock()\n heartbeat_url = heartbeat_info_dict['url']\n heartbeat_data = heartbeat_info_dict['data'].encode()\n heartbeat_interval = heartbeat_info_dict.get('interval', 30)\n\n def heartbeat():\n try:\n compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)\n except Exception:\n self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)\n\n with heartbeat_lock:\n if not download_complete:\n timer[0] = threading.Timer(heartbeat_interval, heartbeat)\n timer[0].start()\n\n heartbeat_info_dict['ping']()\n self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))\n try:\n heartbeat()\n if type(fd).__name__ == 'HlsFD':\n info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])\n success = fd.real_download(filename, info_dict)\n finally:\n if heartbeat_lock:\n with heartbeat_lock:\n timer[0].cancel()\n download_complete = True\n return success\n", "path": "yt_dlp/downloader/niconico.py"}]}
| 2,063 | 232 |
gh_patches_debug_3623
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-modules-extras-3522
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ec2_elb_facts should support check mode
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
ec2_elb_facts
##### ANSIBLE VERSION
```
ansible 2.2.0.0
config file =
configured module search path = Default w/o overrides
```
##### CONFIGURATION
*N/A*
##### OS / ENVIRONMENT
*N/A*
##### SUMMARY
Since the `ec2_elb_facts` is strictly a read-only operation, it should support running with `--check`
##### STEPS TO REPRODUCE
```sh
ansible-playbook \
-i hosts \
-l my-elb-host \
ec2_elb_facts_check.yml \
-vv \
--check
```
```yaml
- hosts: all
connection: local
gather_facts: no
tasks:
- name: Collect ELB facts
ec2_elb_facts:
names: "my-elb"
region: "us-east-1"
register: elbfacts
tags: always
```
##### EXPECTED RESULTS
It would be expected that `ec2_elb_facts` would still fetch the instance information. This being omitted, prevents the ability to enumerate ELB instance hosts, dynamically add them to the inventory, and then conduct `--check` mode against what would *actually* be getting done.
##### ACTUAL RESULTS
```
TASK [Collect ELB facts] ***********************************************
task path: /Projects/ec2_elb_facts_check.yml:6
skipping: [my-elb-host] => {
"changed": false,
"skipped": true
}
MSG:
remote module (ec2_elb_facts) does not support check mode
```
</issue>
<code>
[start of cloud/amazon/ec2_elb_facts.py]
1 #!/usr/bin/python
2 #
3 # This is a free software: you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This Ansible library is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
12 #
13 # You should have received a copy of the GNU General Public License
14 # along with this library. If not, see <http://www.gnu.org/licenses/>.
15
16 DOCUMENTATION = '''
17 ---
18 module: ec2_elb_facts
19 short_description: Gather facts about EC2 Elastic Load Balancers in AWS
20 description:
21 - Gather facts about EC2 Elastic Load Balancers in AWS
22 version_added: "2.0"
23 author:
24 - "Michael Schultz (github.com/mjschultz)"
25 - "Fernando Jose Pando (@nand0p)"
26 options:
27 names:
28 description:
29 - List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
30 required: false
31 default: null
32 aliases: ['elb_ids', 'ec2_elbs']
33 extends_documentation_fragment:
34 - aws
35 - ec2
36 '''
37
38 EXAMPLES = '''
39 # Note: These examples do not set authentication details, see the AWS Guide for details.
40 # Output format tries to match ec2_elb_lb module input parameters
41
42 # Gather facts about all ELBs
43 - action:
44 module: ec2_elb_facts
45 register: elb_facts
46
47 - action:
48 module: debug
49 msg: "{{ item.dns_name }}"
50 with_items: "{{ elb_facts.elbs }}"
51
52 # Gather facts about a particular ELB
53 - action:
54 module: ec2_elb_facts
55 names: frontend-prod-elb
56 register: elb_facts
57
58 - action:
59 module: debug
60 msg: "{{ elb_facts.elbs.0.dns_name }}"
61
62 # Gather facts about a set of ELBs
63 - action:
64 module: ec2_elb_facts
65 names:
66 - frontend-prod-elb
67 - backend-prod-elb
68 register: elb_facts
69
70 - action:
71 module: debug
72 msg: "{{ item.dns_name }}"
73 with_items: "{{ elb_facts.elbs }}"
74
75 '''
76
77 try:
78 import boto.ec2.elb
79 from boto.ec2.tag import Tag
80 from boto.exception import BotoServerError
81 HAS_BOTO = True
82 except ImportError:
83 HAS_BOTO = False
84
85 class ElbInformation(object):
86 """ Handles ELB information """
87
88 def __init__(self,
89 module,
90 names,
91 region,
92 **aws_connect_params):
93
94 self.module = module
95 self.names = names
96 self.region = region
97 self.aws_connect_params = aws_connect_params
98 self.connection = self._get_elb_connection()
99
100 def _get_tags(self, elbname):
101 params = {'LoadBalancerNames.member.1': elbname}
102 try:
103 elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
104 return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
105 except:
106 return {}
107
108 def _get_elb_connection(self):
109 try:
110 return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
111 except BotoServerError as err:
112 self.module.fail_json(msg=err.message)
113
114 def _get_elb_listeners(self, listeners):
115 listener_list = []
116
117 for listener in listeners:
118 listener_dict = {
119 'load_balancer_port': listener[0],
120 'instance_port': listener[1],
121 'protocol': listener[2],
122 }
123
124 try:
125 ssl_certificate_id = listener[4]
126 except IndexError:
127 pass
128 else:
129 if ssl_certificate_id:
130 listener_dict['ssl_certificate_id'] = ssl_certificate_id
131
132 listener_list.append(listener_dict)
133
134 return listener_list
135
136 def _get_health_check(self, health_check):
137 protocol, port_path = health_check.target.split(':')
138 try:
139 port, path = port_path.split('/', 1)
140 path = '/{}'.format(path)
141 except ValueError:
142 port = port_path
143 path = None
144
145 health_check_dict = {
146 'ping_protocol': protocol.lower(),
147 'ping_port': int(port),
148 'response_timeout': health_check.timeout,
149 'interval': health_check.interval,
150 'unhealthy_threshold': health_check.unhealthy_threshold,
151 'healthy_threshold': health_check.healthy_threshold,
152 }
153
154 if path:
155 health_check_dict['ping_path'] = path
156 return health_check_dict
157
158 def _get_elb_info(self, elb):
159 elb_info = {
160 'name': elb.name,
161 'zones': elb.availability_zones,
162 'dns_name': elb.dns_name,
163 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
164 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
165 'hosted_zone_name': elb.canonical_hosted_zone_name,
166 'hosted_zone_id': elb.canonical_hosted_zone_name_id,
167 'instances': [instance.id for instance in elb.instances],
168 'listeners': self._get_elb_listeners(elb.listeners),
169 'scheme': elb.scheme,
170 'security_groups': elb.security_groups,
171 'health_check': self._get_health_check(elb.health_check),
172 'subnets': elb.subnets,
173 'instances_inservice': [],
174 'instances_inservice_count': 0,
175 'instances_outofservice': [],
176 'instances_outofservice_count': 0,
177 'instances_inservice_percent': 0.0,
178 'tags': self._get_tags(elb.name)
179 }
180
181 if elb.vpc_id:
182 elb_info['vpc_id'] = elb.vpc_id
183
184 if elb.instances:
185 try:
186 instance_health = self.connection.describe_instance_health(elb.name)
187 except BotoServerError as err:
188 self.module.fail_json(msg=err.message)
189 elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
190 elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
191 elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
192 elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
193 elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/(
194 float(elb_info['instances_inservice_count']) +
195 float(elb_info['instances_outofservice_count']))*100
196 return elb_info
197
198
199 def list_elbs(self):
200 elb_array = []
201
202 try:
203 all_elbs = self.connection.get_all_load_balancers()
204 except BotoServerError as err:
205 self.module.fail_json(msg = "%s: %s" % (err.error_code, err.error_message))
206
207 if all_elbs:
208 if self.names:
209 for existing_lb in all_elbs:
210 if existing_lb.name in self.names:
211 elb_array.append(existing_lb)
212 else:
213 elb_array = all_elbs
214
215 return list(map(self._get_elb_info, elb_array))
216
217 def main():
218 argument_spec = ec2_argument_spec()
219 argument_spec.update(dict(
220 names={'default': [], 'type': 'list'}
221 )
222 )
223 module = AnsibleModule(argument_spec=argument_spec)
224
225 if not HAS_BOTO:
226 module.fail_json(msg='boto required for this module')
227
228 region, ec2_url, aws_connect_params = get_aws_connection_info(module)
229
230 if not region:
231 module.fail_json(msg="region must be specified")
232
233 names = module.params['names']
234 elb_information = ElbInformation(module,
235 names,
236 region,
237 **aws_connect_params)
238
239 ec2_facts_result = dict(changed=False,
240 elbs=elb_information.list_elbs())
241
242 module.exit_json(**ec2_facts_result)
243
244 from ansible.module_utils.basic import *
245 from ansible.module_utils.ec2 import *
246
247 if __name__ == '__main__':
248 main()
249
[end of cloud/amazon/ec2_elb_facts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cloud/amazon/ec2_elb_facts.py b/cloud/amazon/ec2_elb_facts.py
--- a/cloud/amazon/ec2_elb_facts.py
+++ b/cloud/amazon/ec2_elb_facts.py
@@ -220,7 +220,8 @@
names={'default': [], 'type': 'list'}
)
)
- module = AnsibleModule(argument_spec=argument_spec)
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
|
{"golden_diff": "diff --git a/cloud/amazon/ec2_elb_facts.py b/cloud/amazon/ec2_elb_facts.py\n--- a/cloud/amazon/ec2_elb_facts.py\n+++ b/cloud/amazon/ec2_elb_facts.py\n@@ -220,7 +220,8 @@\n names={'default': [], 'type': 'list'}\n )\n )\n- module = AnsibleModule(argument_spec=argument_spec)\n+ module = AnsibleModule(argument_spec=argument_spec,\n+ supports_check_mode=True)\n \n if not HAS_BOTO:\n module.fail_json(msg='boto required for this module')\n", "issue": "ec2_elb_facts should support check mode\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\nec2_elb_facts\r\n\r\n##### ANSIBLE VERSION\r\n\r\n```\r\nansible 2.2.0.0\r\n config file = \r\n configured module search path = Default w/o overrides\r\n```\r\n\r\n##### CONFIGURATION\r\n*N/A*\r\n\r\n##### OS / ENVIRONMENT\r\n*N/A*\r\n\r\n##### SUMMARY\r\nSince the `ec2_elb_facts` is strictly a read-only operation, it should support running with `--check`\r\n\r\n##### STEPS TO REPRODUCE\r\n\r\n```sh\r\nansible-playbook \\\r\n -i hosts \\\r\n -l my-elb-host \\\r\n ec2_elb_facts_check.yml \\\r\n -vv \\\r\n --check\r\n```\r\n\r\n```yaml\r\n- hosts: all\r\n connection: local\r\n gather_facts: no\r\n\r\n tasks:\r\n - name: Collect ELB facts\r\n ec2_elb_facts:\r\n names: \"my-elb\"\r\n region: \"us-east-1\"\r\n register: elbfacts\r\n tags: always\r\n```\r\n\r\n##### EXPECTED RESULTS\r\nIt would be expected that `ec2_elb_facts` would still fetch the instance information. This being omitted, prevents the ability to enumerate ELB instance hosts, dynamically add them to the inventory, and then conduct `--check` mode against what would *actually* be getting done.\r\n\r\n##### ACTUAL RESULTS\r\n\r\n```\r\nTASK [Collect ELB facts] ***********************************************\r\ntask path: /Projects/ec2_elb_facts_check.yml:6\r\nskipping: [my-elb-host] => {\r\n \"changed\": false, \r\n \"skipped\": true\r\n}\r\n\r\nMSG:\r\n\r\nremote module (ec2_elb_facts) does not support check mode\r\n```\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This is a free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This Ansible library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this library. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: ec2_elb_facts\nshort_description: Gather facts about EC2 Elastic Load Balancers in AWS\ndescription:\n - Gather facts about EC2 Elastic Load Balancers in AWS\nversion_added: \"2.0\"\nauthor:\n - \"Michael Schultz (github.com/mjschultz)\"\n - \"Fernando Jose Pando (@nand0p)\"\noptions:\n names:\n description:\n - List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.\n required: false\n default: null\n aliases: ['elb_ids', 'ec2_elbs']\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n# Output format tries to match ec2_elb_lb module input parameters\n\n# Gather facts about all ELBs\n- action:\n module: ec2_elb_facts\n register: elb_facts\n\n- action:\n module: debug\n msg: \"{{ item.dns_name }}\"\n with_items: \"{{ elb_facts.elbs }}\"\n\n# Gather facts about a particular ELB\n- action:\n module: ec2_elb_facts\n names: frontend-prod-elb\n register: elb_facts\n\n- action:\n module: debug\n msg: \"{{ elb_facts.elbs.0.dns_name }}\"\n\n# Gather facts about a set of ELBs\n- action:\n module: ec2_elb_facts\n names:\n - frontend-prod-elb\n - backend-prod-elb\n register: elb_facts\n\n- action:\n module: debug\n msg: \"{{ item.dns_name }}\"\n with_items: \"{{ elb_facts.elbs }}\"\n\n'''\n\ntry:\n import boto.ec2.elb\n from boto.ec2.tag import Tag\n from boto.exception import BotoServerError\n HAS_BOTO = True\nexcept ImportError:\n HAS_BOTO = False\n\nclass ElbInformation(object):\n \"\"\" Handles ELB information \"\"\"\n\n def __init__(self,\n module,\n names,\n region,\n **aws_connect_params):\n\n self.module = module\n self.names = names\n self.region = region\n self.aws_connect_params = aws_connect_params\n self.connection = self._get_elb_connection()\n\n def _get_tags(self, elbname):\n params = {'LoadBalancerNames.member.1': elbname}\n try:\n elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])\n return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))\n except:\n return {}\n\n def _get_elb_connection(self):\n try:\n return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)\n except BotoServerError as err:\n self.module.fail_json(msg=err.message)\n\n def _get_elb_listeners(self, listeners):\n listener_list = []\n\n for listener in listeners:\n listener_dict = {\n 'load_balancer_port': listener[0],\n 'instance_port': listener[1],\n 'protocol': listener[2],\n }\n\n try:\n ssl_certificate_id = listener[4]\n except IndexError:\n pass\n else:\n if ssl_certificate_id:\n listener_dict['ssl_certificate_id'] = ssl_certificate_id\n\n listener_list.append(listener_dict)\n\n return listener_list\n\n def _get_health_check(self, health_check):\n protocol, port_path = health_check.target.split(':')\n try:\n port, path = port_path.split('/', 1)\n path = '/{}'.format(path)\n except ValueError:\n port = port_path\n path = None\n\n health_check_dict = {\n 'ping_protocol': protocol.lower(),\n 'ping_port': int(port),\n 'response_timeout': health_check.timeout,\n 'interval': health_check.interval,\n 'unhealthy_threshold': health_check.unhealthy_threshold,\n 'healthy_threshold': health_check.healthy_threshold,\n }\n\n if path:\n health_check_dict['ping_path'] = path\n return health_check_dict\n\n def _get_elb_info(self, elb):\n elb_info = {\n 'name': elb.name,\n 'zones': elb.availability_zones,\n 'dns_name': elb.dns_name,\n 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,\n 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,\n 'hosted_zone_name': elb.canonical_hosted_zone_name,\n 'hosted_zone_id': elb.canonical_hosted_zone_name_id,\n 'instances': [instance.id for instance in elb.instances],\n 'listeners': self._get_elb_listeners(elb.listeners),\n 'scheme': elb.scheme,\n 'security_groups': elb.security_groups,\n 'health_check': self._get_health_check(elb.health_check),\n 'subnets': elb.subnets,\n 'instances_inservice': [],\n 'instances_inservice_count': 0,\n 'instances_outofservice': [],\n 'instances_outofservice_count': 0,\n 'instances_inservice_percent': 0.0,\n 'tags': self._get_tags(elb.name)\n }\n\n if elb.vpc_id:\n elb_info['vpc_id'] = elb.vpc_id\n\n if elb.instances:\n try:\n instance_health = self.connection.describe_instance_health(elb.name)\n except BotoServerError as err:\n self.module.fail_json(msg=err.message)\n elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']\n elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])\n elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']\n elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])\n elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/(\n float(elb_info['instances_inservice_count']) +\n float(elb_info['instances_outofservice_count']))*100\n return elb_info\n\n\n def list_elbs(self):\n elb_array = []\n\n try:\n all_elbs = self.connection.get_all_load_balancers()\n except BotoServerError as err:\n self.module.fail_json(msg = \"%s: %s\" % (err.error_code, err.error_message))\n\n if all_elbs:\n if self.names:\n for existing_lb in all_elbs:\n if existing_lb.name in self.names:\n elb_array.append(existing_lb)\n else:\n elb_array = all_elbs\n \n return list(map(self._get_elb_info, elb_array))\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n names={'default': [], 'type': 'list'}\n )\n )\n module = AnsibleModule(argument_spec=argument_spec)\n\n if not HAS_BOTO:\n module.fail_json(msg='boto required for this module')\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module)\n\n if not region:\n module.fail_json(msg=\"region must be specified\")\n\n names = module.params['names']\n elb_information = ElbInformation(module,\n names,\n region,\n **aws_connect_params)\n\n ec2_facts_result = dict(changed=False,\n elbs=elb_information.list_elbs())\n\n module.exit_json(**ec2_facts_result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n", "path": "cloud/amazon/ec2_elb_facts.py"}]}
| 3,439 | 136 |
gh_patches_debug_14029
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-797
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Container dying after startup leads to infinite redirection loop
We are using jupyterhub with the dockerspawner module. Once successfully logged in, we observed that if the docker container is spawned but dies a second or so later (for whatever reason), the redirection from /hub/user/username to /user/username will enter an infinite redirection loop. I suspect the reason is
that there's nothing listening at /user/username (because the container server is not there), so jupyterhub redirects you to /hub/user/username as default action in that case, which in turn tries to send you back to /user/username because you are logged in.
It's an unusual condition and we are running outside the basic design, but if you have any feedback on this it would be great.
jupyterhub version 0.6.1
</issue>
<code>
[start of jupyterhub/user.py]
1 # Copyright (c) Jupyter Development Team.
2 # Distributed under the terms of the Modified BSD License.
3
4 from datetime import datetime, timedelta
5 from urllib.parse import quote, urlparse
6
7 from tornado import gen
8 from tornado.log import app_log
9
10 from sqlalchemy import inspect
11
12 from .utils import url_path_join
13
14 from . import orm
15 from traitlets import HasTraits, Any, Dict, observe, default
16 from .spawner import LocalProcessSpawner
17
18
19 class UserDict(dict):
20 """Like defaultdict, but for users
21
22 Getting by a user id OR an orm.User instance returns a User wrapper around the orm user.
23 """
24 def __init__(self, db_factory, settings):
25 self.db_factory = db_factory
26 self.settings = settings
27 super().__init__()
28
29 @property
30 def db(self):
31 return self.db_factory()
32
33 def __contains__(self, key):
34 if isinstance(key, (User, orm.User)):
35 key = key.id
36 return dict.__contains__(self, key)
37
38 def __getitem__(self, key):
39 if isinstance(key, User):
40 key = key.id
41 elif isinstance(key, str):
42 orm_user = self.db.query(orm.User).filter(orm.User.name==key).first()
43 if orm_user is None:
44 raise KeyError("No such user: %s" % key)
45 else:
46 key = orm_user
47 if isinstance(key, orm.User):
48 # users[orm_user] returns User(orm_user)
49 orm_user = key
50 if orm_user.id not in self:
51 user = self[orm_user.id] = User(orm_user, self.settings)
52 return user
53 user = dict.__getitem__(self, orm_user.id)
54 user.db = self.db
55 return user
56 elif isinstance(key, int):
57 id = key
58 if id not in self:
59 orm_user = self.db.query(orm.User).filter(orm.User.id==id).first()
60 if orm_user is None:
61 raise KeyError("No such user: %s" % id)
62 user = self[id] = User(orm_user, self.settings)
63 return dict.__getitem__(self, id)
64 else:
65 raise KeyError(repr(key))
66
67 def __delitem__(self, key):
68 user = self[key]
69 user_id = user.id
70 db = self.db
71 db.delete(user.orm_user)
72 db.commit()
73 dict.__delitem__(self, user_id)
74
75
76 class User(HasTraits):
77
78 @default('log')
79 def _log_default(self):
80 return app_log
81
82 settings = Dict()
83
84 db = Any(allow_none=True)
85 @default('db')
86 def _db_default(self):
87 if self.orm_user:
88 return inspect(self.orm_user).session
89 @observe('db')
90 def _db_changed(self, change):
91 """Changing db session reacquires ORM User object"""
92 # db session changed, re-get orm User
93 if self.orm_user:
94 id = self.orm_user.id
95 self.orm_user = change['new'].query(orm.User).filter(orm.User.id==id).first()
96 self.spawner.db = self.db
97
98 orm_user = None
99 spawner = None
100 spawn_pending = False
101 stop_pending = False
102
103 @property
104 def authenticator(self):
105 return self.settings.get('authenticator', None)
106
107 @property
108 def spawner_class(self):
109 return self.settings.get('spawner_class', LocalProcessSpawner)
110
111 def __init__(self, orm_user, settings, **kwargs):
112 self.orm_user = orm_user
113 self.settings = settings
114 super().__init__(**kwargs)
115
116 hub = self.db.query(orm.Hub).first()
117
118 self.cookie_name = '%s-%s' % (hub.server.cookie_name, quote(self.name, safe=''))
119 self.base_url = url_path_join(
120 self.settings.get('base_url', '/'), 'user', self.escaped_name)
121
122 self.spawner = self.spawner_class(
123 user=self,
124 db=self.db,
125 hub=hub,
126 authenticator=self.authenticator,
127 config=self.settings.get('config'),
128 )
129
130 # pass get/setattr to ORM user
131
132 def __getattr__(self, attr):
133 if hasattr(self.orm_user, attr):
134 return getattr(self.orm_user, attr)
135 else:
136 raise AttributeError(attr)
137
138 def __setattr__(self, attr, value):
139 if self.orm_user and hasattr(self.orm_user, attr):
140 setattr(self.orm_user, attr, value)
141 else:
142 super().__setattr__(attr, value)
143
144 def __repr__(self):
145 return repr(self.orm_user)
146
147 @property
148 def running(self):
149 """property for whether a user has a running server"""
150 if self.spawn_pending or self.stop_pending:
151 return False # server is not running if spawn or stop is still pending
152 if self.server is None:
153 return False
154 return True
155
156 @property
157 def escaped_name(self):
158 """My name, escaped for use in URLs, cookies, etc."""
159 return quote(self.name, safe='@')
160
161 @property
162 def proxy_path(self):
163 if self.settings.get('subdomain_host'):
164 return url_path_join('/' + self.domain, self.base_url)
165 else:
166 return self.base_url
167
168 @property
169 def domain(self):
170 """Get the domain for my server."""
171 # FIXME: escaped_name probably isn't escaped enough in general for a domain fragment
172 return self.escaped_name + '.' + self.settings['domain']
173
174 @property
175 def host(self):
176 """Get the *host* for my server (proto://domain[:port])"""
177 # FIXME: escaped_name probably isn't escaped enough in general for a domain fragment
178 parsed = urlparse(self.settings['subdomain_host'])
179 h = '%s://%s.%s' % (parsed.scheme, self.escaped_name, parsed.netloc)
180 return h
181
182 @property
183 def url(self):
184 """My URL
185
186 Full name.domain/path if using subdomains, otherwise just my /base/url
187 """
188 if self.settings.get('subdomain_host'):
189 return '{host}{path}'.format(
190 host=self.host,
191 path=self.base_url,
192 )
193 else:
194 return self.base_url
195
196 @gen.coroutine
197 def spawn(self, options=None):
198 """Start the user's spawner"""
199 db = self.db
200
201 self.server = orm.Server(
202 cookie_name=self.cookie_name,
203 base_url=self.base_url,
204 )
205 db.add(self.server)
206 db.commit()
207
208 api_token = self.new_api_token()
209 db.commit()
210
211 spawner = self.spawner
212 spawner.user_options = options or {}
213 # we are starting a new server, make sure it doesn't restore state
214 spawner.clear_state()
215 spawner.api_token = api_token
216
217 # trigger pre-spawn hook on authenticator
218 authenticator = self.authenticator
219 if (authenticator):
220 yield gen.maybe_future(authenticator.pre_spawn_start(self, spawner))
221
222 self.spawn_pending = True
223 # wait for spawner.start to return
224 try:
225 f = spawner.start()
226 # commit any changes in spawner.start (always commit db changes before yield)
227 db.commit()
228 ip_port = yield gen.with_timeout(timedelta(seconds=spawner.start_timeout), f)
229 if ip_port:
230 # get ip, port info from return value of start()
231 self.server.ip, self.server.port = ip_port
232 else:
233 # prior to 0.7, spawners had to store this info in user.server themselves.
234 # Handle < 0.7 behavior with a warning, assuming info was stored in db by the Spawner.
235 self.log.warning("DEPRECATION: Spawner.start should return (ip, port) in JupyterHub >= 0.7")
236 except Exception as e:
237 if isinstance(e, gen.TimeoutError):
238 self.log.warning("{user}'s server failed to start in {s} seconds, giving up".format(
239 user=self.name, s=spawner.start_timeout,
240 ))
241 e.reason = 'timeout'
242 else:
243 self.log.error("Unhandled error starting {user}'s server: {error}".format(
244 user=self.name, error=e,
245 ))
246 e.reason = 'error'
247 try:
248 yield self.stop()
249 except Exception:
250 self.log.error("Failed to cleanup {user}'s server that failed to start".format(
251 user=self.name,
252 ), exc_info=True)
253 # raise original exception
254 raise e
255 spawner.start_polling()
256
257 # store state
258 self.state = spawner.get_state()
259 self.last_activity = datetime.utcnow()
260 db.commit()
261 self.spawn_pending = False
262 try:
263 yield self.server.wait_up(http=True, timeout=spawner.http_timeout)
264 except Exception as e:
265 if isinstance(e, TimeoutError):
266 self.log.warning(
267 "{user}'s server never showed up at {url} "
268 "after {http_timeout} seconds. Giving up".format(
269 user=self.name,
270 url=self.server.url,
271 http_timeout=spawner.http_timeout,
272 )
273 )
274 e.reason = 'timeout'
275 else:
276 e.reason = 'error'
277 self.log.error("Unhandled error waiting for {user}'s server to show up at {url}: {error}".format(
278 user=self.name, url=self.server.url, error=e,
279 ))
280 try:
281 yield self.stop()
282 except Exception:
283 self.log.error("Failed to cleanup {user}'s server that failed to start".format(
284 user=self.name,
285 ), exc_info=True)
286 # raise original TimeoutError
287 raise e
288 return self
289
290 @gen.coroutine
291 def stop(self):
292 """Stop the user's spawner
293
294 and cleanup after it.
295 """
296 self.spawn_pending = False
297 spawner = self.spawner
298 self.spawner.stop_polling()
299 self.stop_pending = True
300 try:
301 api_token = self.spawner.api_token
302 status = yield spawner.poll()
303 if status is None:
304 yield self.spawner.stop()
305 spawner.clear_state()
306 self.state = spawner.get_state()
307 self.last_activity = datetime.utcnow()
308 # cleanup server entry, API token from defunct server
309 if self.server:
310 # cleanup server entry from db
311 self.db.delete(self.server)
312 orm_token = orm.APIToken.find(self.db, api_token)
313 if orm_token:
314 self.db.delete(orm_token)
315 self.server = None
316 self.db.commit()
317 finally:
318 self.stop_pending = False
319 # trigger post-spawner hook on authenticator
320 auth = spawner.authenticator
321 if auth:
322 yield gen.maybe_future(
323 auth.post_spawn_stop(self, spawner)
324 )
325
326
[end of jupyterhub/user.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/jupyterhub/user.py b/jupyterhub/user.py
--- a/jupyterhub/user.py
+++ b/jupyterhub/user.py
@@ -258,7 +258,6 @@
self.state = spawner.get_state()
self.last_activity = datetime.utcnow()
db.commit()
- self.spawn_pending = False
try:
yield self.server.wait_up(http=True, timeout=spawner.http_timeout)
except Exception as e:
@@ -285,6 +284,7 @@
), exc_info=True)
# raise original TimeoutError
raise e
+ self.spawn_pending = False
return self
@gen.coroutine
|
{"golden_diff": "diff --git a/jupyterhub/user.py b/jupyterhub/user.py\n--- a/jupyterhub/user.py\n+++ b/jupyterhub/user.py\n@@ -258,7 +258,6 @@\n self.state = spawner.get_state()\n self.last_activity = datetime.utcnow()\n db.commit()\n- self.spawn_pending = False\n try:\n yield self.server.wait_up(http=True, timeout=spawner.http_timeout)\n except Exception as e:\n@@ -285,6 +284,7 @@\n ), exc_info=True)\n # raise original TimeoutError\n raise e\n+ self.spawn_pending = False\n return self\n \n @gen.coroutine\n", "issue": "Container dying after startup leads to infinite redirection loop\nWe are using jupyterhub with the dockerspawner module. Once successfully logged in, we observed that if the docker container is spawned but dies a second or so later (for whatever reason), the redirection from /hub/user/username to /user/username will enter an infinite redirection loop. I suspect the reason is\nthat there's nothing listening at /user/username (because the container server is not there), so jupyterhub redirects you to /hub/user/username as default action in that case, which in turn tries to send you back to /user/username because you are logged in.\n\nIt's an unusual condition and we are running outside the basic design, but if you have any feedback on this it would be great.\n\njupyterhub version 0.6.1\n\n", "before_files": [{"content": "# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom datetime import datetime, timedelta\nfrom urllib.parse import quote, urlparse\n\nfrom tornado import gen\nfrom tornado.log import app_log\n\nfrom sqlalchemy import inspect\n\nfrom .utils import url_path_join\n\nfrom . import orm\nfrom traitlets import HasTraits, Any, Dict, observe, default\nfrom .spawner import LocalProcessSpawner\n\n\nclass UserDict(dict):\n \"\"\"Like defaultdict, but for users\n \n Getting by a user id OR an orm.User instance returns a User wrapper around the orm user.\n \"\"\"\n def __init__(self, db_factory, settings):\n self.db_factory = db_factory\n self.settings = settings\n super().__init__()\n \n @property\n def db(self):\n return self.db_factory()\n \n def __contains__(self, key):\n if isinstance(key, (User, orm.User)):\n key = key.id\n return dict.__contains__(self, key)\n \n def __getitem__(self, key):\n if isinstance(key, User):\n key = key.id\n elif isinstance(key, str):\n orm_user = self.db.query(orm.User).filter(orm.User.name==key).first()\n if orm_user is None:\n raise KeyError(\"No such user: %s\" % key)\n else:\n key = orm_user\n if isinstance(key, orm.User):\n # users[orm_user] returns User(orm_user)\n orm_user = key\n if orm_user.id not in self:\n user = self[orm_user.id] = User(orm_user, self.settings)\n return user\n user = dict.__getitem__(self, orm_user.id)\n user.db = self.db\n return user\n elif isinstance(key, int):\n id = key\n if id not in self:\n orm_user = self.db.query(orm.User).filter(orm.User.id==id).first()\n if orm_user is None:\n raise KeyError(\"No such user: %s\" % id)\n user = self[id] = User(orm_user, self.settings)\n return dict.__getitem__(self, id)\n else:\n raise KeyError(repr(key))\n \n def __delitem__(self, key):\n user = self[key]\n user_id = user.id\n db = self.db\n db.delete(user.orm_user)\n db.commit()\n dict.__delitem__(self, user_id)\n\n\nclass User(HasTraits):\n \n @default('log')\n def _log_default(self):\n return app_log\n \n settings = Dict()\n \n db = Any(allow_none=True)\n @default('db')\n def _db_default(self):\n if self.orm_user:\n return inspect(self.orm_user).session\n @observe('db')\n def _db_changed(self, change):\n \"\"\"Changing db session reacquires ORM User object\"\"\"\n # db session changed, re-get orm User\n if self.orm_user:\n id = self.orm_user.id\n self.orm_user = change['new'].query(orm.User).filter(orm.User.id==id).first()\n self.spawner.db = self.db\n \n orm_user = None\n spawner = None\n spawn_pending = False\n stop_pending = False\n \n @property\n def authenticator(self):\n return self.settings.get('authenticator', None)\n \n @property\n def spawner_class(self):\n return self.settings.get('spawner_class', LocalProcessSpawner)\n \n def __init__(self, orm_user, settings, **kwargs):\n self.orm_user = orm_user\n self.settings = settings\n super().__init__(**kwargs)\n \n hub = self.db.query(orm.Hub).first()\n \n self.cookie_name = '%s-%s' % (hub.server.cookie_name, quote(self.name, safe=''))\n self.base_url = url_path_join(\n self.settings.get('base_url', '/'), 'user', self.escaped_name)\n \n self.spawner = self.spawner_class(\n user=self,\n db=self.db,\n hub=hub,\n authenticator=self.authenticator,\n config=self.settings.get('config'),\n )\n \n # pass get/setattr to ORM user\n \n def __getattr__(self, attr):\n if hasattr(self.orm_user, attr):\n return getattr(self.orm_user, attr)\n else:\n raise AttributeError(attr)\n \n def __setattr__(self, attr, value):\n if self.orm_user and hasattr(self.orm_user, attr):\n setattr(self.orm_user, attr, value)\n else:\n super().__setattr__(attr, value)\n \n def __repr__(self):\n return repr(self.orm_user)\n \n @property\n def running(self):\n \"\"\"property for whether a user has a running server\"\"\"\n if self.spawn_pending or self.stop_pending:\n return False # server is not running if spawn or stop is still pending\n if self.server is None:\n return False\n return True\n \n @property\n def escaped_name(self):\n \"\"\"My name, escaped for use in URLs, cookies, etc.\"\"\"\n return quote(self.name, safe='@')\n \n @property\n def proxy_path(self):\n if self.settings.get('subdomain_host'):\n return url_path_join('/' + self.domain, self.base_url)\n else:\n return self.base_url\n \n @property\n def domain(self):\n \"\"\"Get the domain for my server.\"\"\"\n # FIXME: escaped_name probably isn't escaped enough in general for a domain fragment\n return self.escaped_name + '.' + self.settings['domain']\n \n @property\n def host(self):\n \"\"\"Get the *host* for my server (proto://domain[:port])\"\"\"\n # FIXME: escaped_name probably isn't escaped enough in general for a domain fragment\n parsed = urlparse(self.settings['subdomain_host'])\n h = '%s://%s.%s' % (parsed.scheme, self.escaped_name, parsed.netloc)\n return h\n \n @property\n def url(self):\n \"\"\"My URL\n \n Full name.domain/path if using subdomains, otherwise just my /base/url\n \"\"\"\n if self.settings.get('subdomain_host'):\n return '{host}{path}'.format(\n host=self.host,\n path=self.base_url,\n )\n else:\n return self.base_url\n \n @gen.coroutine\n def spawn(self, options=None):\n \"\"\"Start the user's spawner\"\"\"\n db = self.db\n \n self.server = orm.Server(\n cookie_name=self.cookie_name,\n base_url=self.base_url,\n )\n db.add(self.server)\n db.commit()\n \n api_token = self.new_api_token()\n db.commit()\n \n spawner = self.spawner\n spawner.user_options = options or {}\n # we are starting a new server, make sure it doesn't restore state\n spawner.clear_state()\n spawner.api_token = api_token\n\n # trigger pre-spawn hook on authenticator\n authenticator = self.authenticator\n if (authenticator):\n yield gen.maybe_future(authenticator.pre_spawn_start(self, spawner))\n\n self.spawn_pending = True\n # wait for spawner.start to return\n try:\n f = spawner.start()\n # commit any changes in spawner.start (always commit db changes before yield)\n db.commit()\n ip_port = yield gen.with_timeout(timedelta(seconds=spawner.start_timeout), f)\n if ip_port:\n # get ip, port info from return value of start()\n self.server.ip, self.server.port = ip_port\n else:\n # prior to 0.7, spawners had to store this info in user.server themselves.\n # Handle < 0.7 behavior with a warning, assuming info was stored in db by the Spawner.\n self.log.warning(\"DEPRECATION: Spawner.start should return (ip, port) in JupyterHub >= 0.7\")\n except Exception as e:\n if isinstance(e, gen.TimeoutError):\n self.log.warning(\"{user}'s server failed to start in {s} seconds, giving up\".format(\n user=self.name, s=spawner.start_timeout,\n ))\n e.reason = 'timeout'\n else:\n self.log.error(\"Unhandled error starting {user}'s server: {error}\".format(\n user=self.name, error=e,\n ))\n e.reason = 'error'\n try:\n yield self.stop()\n except Exception:\n self.log.error(\"Failed to cleanup {user}'s server that failed to start\".format(\n user=self.name,\n ), exc_info=True)\n # raise original exception\n raise e\n spawner.start_polling()\n\n # store state\n self.state = spawner.get_state()\n self.last_activity = datetime.utcnow()\n db.commit()\n self.spawn_pending = False\n try:\n yield self.server.wait_up(http=True, timeout=spawner.http_timeout)\n except Exception as e:\n if isinstance(e, TimeoutError):\n self.log.warning(\n \"{user}'s server never showed up at {url} \"\n \"after {http_timeout} seconds. Giving up\".format(\n user=self.name,\n url=self.server.url,\n http_timeout=spawner.http_timeout,\n )\n )\n e.reason = 'timeout'\n else:\n e.reason = 'error'\n self.log.error(\"Unhandled error waiting for {user}'s server to show up at {url}: {error}\".format(\n user=self.name, url=self.server.url, error=e,\n ))\n try:\n yield self.stop()\n except Exception:\n self.log.error(\"Failed to cleanup {user}'s server that failed to start\".format(\n user=self.name,\n ), exc_info=True)\n # raise original TimeoutError\n raise e\n return self\n\n @gen.coroutine\n def stop(self):\n \"\"\"Stop the user's spawner\n \n and cleanup after it.\n \"\"\"\n self.spawn_pending = False\n spawner = self.spawner\n self.spawner.stop_polling()\n self.stop_pending = True\n try:\n api_token = self.spawner.api_token\n status = yield spawner.poll()\n if status is None:\n yield self.spawner.stop()\n spawner.clear_state()\n self.state = spawner.get_state()\n self.last_activity = datetime.utcnow()\n # cleanup server entry, API token from defunct server\n if self.server:\n # cleanup server entry from db\n self.db.delete(self.server)\n orm_token = orm.APIToken.find(self.db, api_token)\n if orm_token:\n self.db.delete(orm_token)\n self.server = None\n self.db.commit()\n finally:\n self.stop_pending = False\n # trigger post-spawner hook on authenticator\n auth = spawner.authenticator\n if auth:\n yield gen.maybe_future(\n auth.post_spawn_stop(self, spawner)\n )\n\n", "path": "jupyterhub/user.py"}]}
| 3,935 | 149 |
gh_patches_debug_20623
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-3068
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Expand/improve document part of event endpoint
<!--
Please add the appropriate label for what change should be made:
docs: changes to the documentation)
refactor: refactoring production code, eg. renaming a variable or rewriting a function
test: adding missing tests, refactoring tests; no production code change
chore: updating poetry etc; no production code change
-->
### Describe the change
Change the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).
### Motivation
It's very nice for the app to be able to just fetch files directly.
Also, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.
### Current implementation
There is a URL that leads to the file, but only after a redirect to `cdn`.
</issue>
<code>
[start of website/documents/api/v2/serializers/document.py]
1 from rest_framework.fields import SerializerMethodField
2 from rest_framework.reverse import reverse
3
4 from documents.models import Document
5 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
6 CleanedModelSerializer,
7 )
8
9
10 class DocumentSerializer(CleanedModelSerializer):
11 class Meta:
12 model = Document
13 fields = ("pk", "name", "url", "category", "members_only")
14
15 url = SerializerMethodField("_url")
16
17 def _url(self, instance):
18 return self.context["request"].build_absolute_uri(
19 reverse("documents:document", kwargs={"pk": instance.pk})
20 )
21
[end of website/documents/api/v2/serializers/document.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py
--- a/website/documents/api/v2/serializers/document.py
+++ b/website/documents/api/v2/serializers/document.py
@@ -1,10 +1,10 @@
from rest_framework.fields import SerializerMethodField
-from rest_framework.reverse import reverse
from documents.models import Document
from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
CleanedModelSerializer,
)
+from utils.media.services import get_media_url
class DocumentSerializer(CleanedModelSerializer):
@@ -15,6 +15,10 @@
url = SerializerMethodField("_url")
def _url(self, instance):
- return self.context["request"].build_absolute_uri(
- reverse("documents:document", kwargs={"pk": instance.pk})
- )
+ if instance.members_only and (
+ not self.request.user.is_authenticated
+ or not self.request.member.has_active_membership()
+ ):
+ return self.request.build_absolute_uri(instance.get_absolute_url())
+
+ return get_media_url(instance.file, absolute_url=True)
|
{"golden_diff": "diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py\n--- a/website/documents/api/v2/serializers/document.py\n+++ b/website/documents/api/v2/serializers/document.py\n@@ -1,10 +1,10 @@\n from rest_framework.fields import SerializerMethodField\n-from rest_framework.reverse import reverse\n \n from documents.models import Document\n from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n )\n+from utils.media.services import get_media_url\n \n \n class DocumentSerializer(CleanedModelSerializer):\n@@ -15,6 +15,10 @@\n url = SerializerMethodField(\"_url\")\n \n def _url(self, instance):\n- return self.context[\"request\"].build_absolute_uri(\n- reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n- )\n+ if instance.members_only and (\n+ not self.request.user.is_authenticated\n+ or not self.request.member.has_active_membership()\n+ ):\n+ return self.request.build_absolute_uri(instance.get_absolute_url())\n+\n+ return get_media_url(instance.file, absolute_url=True)\n", "issue": "Expand/improve document part of event endpoint\n<!--\r\n\r\nPlease add the appropriate label for what change should be made:\r\ndocs: changes to the documentation)\r\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\r\ntest: adding missing tests, refactoring tests; no production code change\r\nchore: updating poetry etc; no production code change\r\n\r\n-->\r\n\r\n### Describe the change\r\nChange the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).\r\n\r\n### Motivation\r\nIt's very nice for the app to be able to just fetch files directly.\r\nAlso, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.\r\n\r\n### Current implementation\r\nThere is a URL that leads to the file, but only after a redirect to `cdn`.\n", "before_files": [{"content": "from rest_framework.fields import SerializerMethodField\nfrom rest_framework.reverse import reverse\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n return self.context[\"request\"].build_absolute_uri(\n reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n )\n", "path": "website/documents/api/v2/serializers/document.py"}]}
| 889 | 251 |
gh_patches_debug_51282
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-636
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a standard way to "reset" a Configuration object for testing
It is a common occurrence in tests that the global `Configuration` object needs to be "reset" between tests. This means that its attributes need to be set back to their original values. Since the `Configuration` object is immutable by design, some additional, non-production available mechanism is needed to perform this action.
The need for this feature was mentioned in a [conversation](https://github.com/open-telemetry/opentelemetry-python/pull/630#discussion_r418343720) in #630.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # FIXME find a better way to avoid all those "Expression has type "Any"" errors
16 # type: ignore
17
18 """
19 Simple configuration manager
20
21 This is a configuration manager for OpenTelemetry. It reads configuration
22 values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
23 characters are only alphanumeric characters and unserscores, except for the
24 first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
25
26 For example, these environment variables will be read:
27
28 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
29 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
30 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
31 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
32 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
33
34 These won't:
35
36 1. ``OPENTELEMETRY_PYTH_SOMETHING``
37 2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
38 3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
39
40 The values stored in the environment variables can be found in an instance of
41 ``opentelemetry.configuration.Configuration``. This class can be instantiated
42 freely because instantiating it returns always the same object.
43
44 For example, if the environment variable
45 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
46 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
47
48 Non defined attributes will always return ``None``. This is intended to make it
49 easier to use the ``Configuration`` object in actual code, because it won't be
50 necessary to check for the attribute to be defined first.
51
52 Environment variables used by OpenTelemetry
53 -------------------------------------------
54
55 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
56 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
57
58 The value of these environment variables should be the name of the entry point
59 that points to the class that implements either provider. This OpenTelemetry
60 API package provides one entry point for each, which can be found in the
61 setup.py file::
62
63 entry_points={
64 ...
65 "opentelemetry_meter_provider": [
66 "default_meter_provider = "
67 "opentelemetry.metrics:DefaultMeterProvider"
68 ],
69 "opentelemetry_tracer_provider": [
70 "default_tracer_provider = "
71 "opentelemetry.trace:DefaultTracerProvider"
72 ],
73 }
74
75 To use the meter provider above, then the
76 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
77 "default_meter_provider" (this is not actually necessary since the
78 OpenTelemetry API provided providers are the default ones used if no
79 configuration is found in the environment variables).
80 """
81
82 from os import environ
83 from re import fullmatch
84
85
86 class Configuration:
87 _instance = None
88
89 __slots__ = []
90
91 def __new__(cls) -> "Configuration":
92 if Configuration._instance is None:
93
94 for key, value in environ.items():
95
96 match = fullmatch(
97 r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
98 )
99
100 if match is not None:
101
102 key = match.group(1)
103
104 setattr(Configuration, "_{}".format(key), value)
105 setattr(
106 Configuration,
107 key,
108 property(
109 fget=lambda cls, key=key: getattr(
110 cls, "_{}".format(key)
111 )
112 ),
113 )
114
115 Configuration.__slots__.append(key)
116
117 Configuration.__slots__ = tuple(Configuration.__slots__)
118
119 Configuration._instance = object.__new__(cls)
120
121 return cls._instance
122
123 def __getattr__(self, name):
124 return None
125
[end of opentelemetry-api/src/opentelemetry/configuration/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -122,3 +122,20 @@
def __getattr__(self, name):
return None
+
+ @classmethod
+ def _reset(cls):
+ """
+ This method "resets" the global configuration attributes
+
+ It is not intended to be used by production code but by testing code
+ only.
+ """
+
+ for slot in cls.__slots__:
+ if slot in cls.__dict__.keys():
+ delattr(cls, slot)
+ delattr(cls, "_{}".format(slot))
+
+ cls.__slots__ = []
+ cls._instance = None
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -122,3 +122,20 @@\n \n def __getattr__(self, name):\n return None\n+\n+ @classmethod\n+ def _reset(cls):\n+ \"\"\"\n+ This method \"resets\" the global configuration attributes\n+\n+ It is not intended to be used by production code but by testing code\n+ only.\n+ \"\"\"\n+\n+ for slot in cls.__slots__:\n+ if slot in cls.__dict__.keys():\n+ delattr(cls, slot)\n+ delattr(cls, \"_{}\".format(slot))\n+\n+ cls.__slots__ = []\n+ cls._instance = None\n", "issue": "Add a standard way to \"reset\" a Configuration object for testing\nIt is a common occurrence in tests that the global `Configuration` object needs to be \"reset\" between tests. This means that its attributes need to be set back to their original values. Since the `Configuration` object is immutable by design, some additional, non-production available mechanism is needed to perform this action.\r\n\r\nThe need for this feature was mentioned in a [conversation](https://github.com/open-telemetry/opentelemetry-python/pull/630#discussion_r418343720) in #630.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]}
| 1,916 | 205 |
gh_patches_debug_3505
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-1832
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Localization of duplicate email account doesn't make sense
I saw this weird thing in post production when I was creating a customer account that was already registered.


---
Edit: note, the untranslated string is ` User with this Email already exists. `
---
I'm not quite sure how to explain this, it doesn't make sense in other language, but make sense in English. It's understandable for a French or a Polish for example but doesn't make real sense it shouldn't be that way, it looks like a debug string (as a dev).
I will wait to hear back from you. I also know it's not from your side, so I'm not quite sure of what you can do about this.
The error is generated by `django.db.models.base.Model#unique_error_message` (sourcing from `django.forms.models.BaseModelForm#_post_clean`).
```python
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': capfirst(opts.verbose_name),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = capfirst(field.verbose_name)
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = get_text_list(field_labels, _('and'))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
# ^^^^^^
```
</issue>
<code>
[start of saleor/account/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.contrib.auth import forms as django_forms, update_session_auth_hash
4 from django.utils.translation import pgettext, pgettext_lazy
5 from phonenumbers.phonenumberutil import country_code_for_region
6
7 from ..account.models import User
8 from .i18n import AddressMetaForm, get_address_form_class
9 from . import emails
10
11
12 def get_address_form(
13 data, country_code, initial=None, instance=None, **kwargs):
14 country_form = AddressMetaForm(data, initial=initial)
15 preview = False
16 if country_form.is_valid():
17 country_code = country_form.cleaned_data['country']
18 preview = country_form.cleaned_data['preview']
19
20 if initial is None and country_code:
21 initial = {}
22 if country_code:
23 initial['phone'] = '+{}'.format(country_code_for_region(country_code))
24
25 address_form_class = get_address_form_class(country_code)
26
27 if not preview and instance is not None:
28 address_form_class = get_address_form_class(
29 instance.country.code)
30 address_form = address_form_class(
31 data, instance=instance, **kwargs)
32 else:
33 initial_address = (
34 initial if not preview
35 else data.dict() if data is not None else data)
36 address_form = address_form_class(
37 not preview and data or None,
38 initial=initial_address,
39 **kwargs)
40 return address_form, preview
41
42
43 class ChangePasswordForm(django_forms.PasswordChangeForm):
44 def __init__(self, *args, **kwargs):
45 super().__init__(*args, **kwargs)
46 self.fields['new_password1'].user = self.user
47 self.fields['old_password'].widget.attrs['placeholder'] = ''
48 self.fields['new_password1'].widget.attrs['placeholder'] = ''
49 del self.fields['new_password2']
50
51
52 def logout_on_password_change(request, user):
53 if (update_session_auth_hash is not None and
54 not settings.LOGOUT_ON_PASSWORD_CHANGE):
55 update_session_auth_hash(request, user)
56
57
58 class LoginForm(django_forms.AuthenticationForm):
59 username = forms.EmailField(
60 label=pgettext('Form field', 'Email'), max_length=75)
61
62 def __init__(self, request=None, *args, **kwargs):
63 super().__init__(request=request, *args, **kwargs)
64 if request:
65 email = request.GET.get('email')
66 if email:
67 self.fields['username'].initial = email
68
69
70 class SignupForm(forms.ModelForm):
71 password = forms.CharField(
72 widget=forms.PasswordInput)
73
74 class Meta:
75 model = User
76 fields = ('email',)
77 labels = {
78 'email': pgettext_lazy(
79 'Email', 'Email'),
80 'password': pgettext_lazy(
81 'Password', 'Password')}
82
83 def __init__(self, *args, **kwargs):
84 super().__init__(*args, **kwargs)
85 if self._meta.model.USERNAME_FIELD in self.fields:
86 self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
87 {'autofocus': ''})
88
89 def save(self, request=None, commit=True):
90 user = super().save(commit=False)
91 password = self.cleaned_data['password']
92 user.set_password(password)
93 if commit:
94 user.save()
95 return user
96
97
98 class PasswordResetForm(django_forms.PasswordResetForm):
99 """Allow resetting passwords.
100
101 This subclass overrides sending emails to use templated email.
102 """
103
104 def get_users(self, email):
105 active_users = User.objects.filter(email__iexact=email, is_active=True)
106 return active_users
107
108 def send_mail(
109 self, subject_template_name, email_template_name, context,
110 from_email, to_email, html_email_template_name=None):
111 emails.send_password_reset_email.delay(context, to_email)
112
[end of saleor/account/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/account/forms.py b/saleor/account/forms.py
--- a/saleor/account/forms.py
+++ b/saleor/account/forms.py
@@ -70,6 +70,11 @@
class SignupForm(forms.ModelForm):
password = forms.CharField(
widget=forms.PasswordInput)
+ email = forms.EmailField(
+ error_messages={
+ 'unique': pgettext_lazy(
+ 'Registration error',
+ 'This email has already been registered.')})
class Meta:
model = User
|
{"golden_diff": "diff --git a/saleor/account/forms.py b/saleor/account/forms.py\n--- a/saleor/account/forms.py\n+++ b/saleor/account/forms.py\n@@ -70,6 +70,11 @@\n class SignupForm(forms.ModelForm):\n password = forms.CharField(\n widget=forms.PasswordInput)\n+ email = forms.EmailField(\n+ error_messages={\n+ 'unique': pgettext_lazy(\n+ 'Registration error',\n+ 'This email has already been registered.')})\n \n class Meta:\n model = User\n", "issue": "Localization of duplicate email account doesn't make sense\nI saw this weird thing in post production when I was creating a customer account that was already registered.\r\n\r\n\r\n\r\n\r\n\r\n---\r\n\r\nEdit: note, the untranslated string is ` User with this Email already exists. `\r\n\r\n---\r\n\r\nI'm not quite sure how to explain this, it doesn't make sense in other language, but make sense in English. It's understandable for a French or a Polish for example but doesn't make real sense it shouldn't be that way, it looks like a debug string (as a dev).\r\n\r\nI will wait to hear back from you. I also know it's not from your side, so I'm not quite sure of what you can do about this.\r\n\r\nThe error is generated by `django.db.models.base.Model#unique_error_message` (sourcing from `django.forms.models.BaseModelForm#_post_clean`).\r\n```python\r\n def unique_error_message(self, model_class, unique_check):\r\n opts = model_class._meta\r\n\r\n params = {\r\n 'model': self,\r\n 'model_class': model_class,\r\n 'model_name': capfirst(opts.verbose_name),\r\n 'unique_check': unique_check,\r\n }\r\n\r\n # A unique field\r\n if len(unique_check) == 1:\r\n field = opts.get_field(unique_check[0])\r\n params['field_label'] = capfirst(field.verbose_name)\r\n return ValidationError(\r\n message=field.error_messages['unique'],\r\n code='unique',\r\n params=params,\r\n )\r\n\r\n # unique_together\r\n else:\r\n field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]\r\n params['field_labels'] = get_text_list(field_labels, _('and'))\r\n return ValidationError(\r\n message=_(\"%(model_name)s with this %(field_labels)s already exists.\"),\r\n code='unique_together',\r\n params=params,\r\n )\r\n # ^^^^^^\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom .i18n import AddressMetaForm, get_address_form_class\nfrom . import emails\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(\n instance.country.code)\n address_form = address_form_class(\n data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm):\n password = forms.CharField(\n widget=forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ('email',)\n labels = {\n 'email': pgettext_lazy(\n 'Email', 'Email'),\n 'password': pgettext_lazy(\n 'Password', 'Password')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n emails.send_password_reset_email.delay(context, to_email)\n", "path": "saleor/account/forms.py"}]}
| 1,993 | 116 |
gh_patches_debug_17693
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1095
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Root Django URL doesn't appear in the UI when using routes
**Describe the bug**: `transaction.name` field is missing from the `transcction` document
**To Reproduce**
1. set `DJANGO_TRANSACTION_NAME_FROM_ROUTE` to `True`
2. have url of `path("", TemplateView.as_view(template_name="pages/home.html"), name="home")`
3. ...
4. not profit
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.8
- Framework and version [e.g. Django 2.1]: Django 3.1.6
- APM Server version: 7.12
- Agent version:6.1.1
</issue>
<code>
[start of elasticapm/contrib/django/middleware/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from __future__ import absolute_import
33
34 import logging
35 import threading
36
37 from django.apps import apps
38 from django.conf import settings as django_settings
39
40 import elasticapm
41 from elasticapm.conf import constants
42 from elasticapm.contrib.django.client import client, get_client
43 from elasticapm.utils import build_name_with_http_method_prefix, get_name_from_func, wrapt
44
45 try:
46 from importlib import import_module
47 except ImportError:
48 from django.utils.importlib import import_module
49
50 try:
51 from django.utils.deprecation import MiddlewareMixin
52 except ImportError:
53 # no-op class for Django < 1.10
54 class MiddlewareMixin(object):
55 pass
56
57
58 def _is_ignorable_404(uri):
59 """
60 Returns True if the given request *shouldn't* notify the site managers.
61 """
62 urls = getattr(django_settings, "IGNORABLE_404_URLS", ())
63 return any(pattern.search(uri) for pattern in urls)
64
65
66 class ElasticAPMClientMiddlewareMixin(object):
67 @property
68 def client(self):
69 try:
70 app = apps.get_app_config("elasticapm.contrib.django")
71 return app.client
72 except LookupError:
73 return get_client()
74
75
76 class Catch404Middleware(MiddlewareMixin, ElasticAPMClientMiddlewareMixin):
77 def process_response(self, request, response):
78 if response.status_code != 404 or _is_ignorable_404(request.get_full_path()):
79 return response
80 if django_settings.DEBUG and not self.client.config.debug:
81 return response
82 data = {"level": logging.INFO, "logger": "http404"}
83 result = self.client.capture(
84 "Message",
85 request=request,
86 param_message={"message": "Page Not Found: %s", "params": [request.build_absolute_uri()]},
87 logger_name="http404",
88 level=logging.INFO,
89 )
90 request._elasticapm = {"service_name": data.get("service_name", self.client.config.service_name), "id": result}
91 return response
92
93
94 def get_name_from_middleware(wrapped, instance):
95 name = [type(instance).__name__, wrapped.__name__]
96 if type(instance).__module__:
97 name = [type(instance).__module__] + name
98 return ".".join(name)
99
100
101 def process_request_wrapper(wrapped, instance, args, kwargs):
102 response = wrapped(*args, **kwargs)
103 try:
104 if response is not None:
105 request = args[0]
106 elasticapm.set_transaction_name(
107 build_name_with_http_method_prefix(get_name_from_middleware(wrapped, instance), request)
108 )
109 finally:
110 return response
111
112
113 def process_response_wrapper(wrapped, instance, args, kwargs):
114 response = wrapped(*args, **kwargs)
115 try:
116 request, original_response = args
117 # if there's no view_func on the request, and this middleware created
118 # a new response object, it's logged as the responsible transaction
119 # name
120 if not hasattr(request, "_elasticapm_view_func") and response is not original_response:
121 elasticapm.set_transaction_name(
122 build_name_with_http_method_prefix(get_name_from_middleware(wrapped, instance), request)
123 )
124 finally:
125 return response
126
127
128 class TracingMiddleware(MiddlewareMixin, ElasticAPMClientMiddlewareMixin):
129 _elasticapm_instrumented = False
130 _instrumenting_lock = threading.Lock()
131
132 def __init__(self, *args, **kwargs):
133 super(TracingMiddleware, self).__init__(*args, **kwargs)
134 if not self._elasticapm_instrumented:
135 with self._instrumenting_lock:
136 if not self._elasticapm_instrumented:
137 if self.client.config.instrument_django_middleware:
138 self.instrument_middlewares()
139
140 TracingMiddleware._elasticapm_instrumented = True
141
142 def instrument_middlewares(self):
143 middlewares = getattr(django_settings, "MIDDLEWARE", None) or getattr(
144 django_settings, "MIDDLEWARE_CLASSES", None
145 )
146 if middlewares:
147 for middleware_path in middlewares:
148 module_path, class_name = middleware_path.rsplit(".", 1)
149 try:
150 module = import_module(module_path)
151 middleware_class = getattr(module, class_name)
152 if middleware_class == type(self):
153 # don't instrument ourselves
154 continue
155 if hasattr(middleware_class, "process_request"):
156 wrapt.wrap_function_wrapper(middleware_class, "process_request", process_request_wrapper)
157 if hasattr(middleware_class, "process_response"):
158 wrapt.wrap_function_wrapper(middleware_class, "process_response", process_response_wrapper)
159 except ImportError:
160 client.logger.warning("Can't instrument middleware %s", middleware_path)
161
162 def process_view(self, request, view_func, view_args, view_kwargs):
163 request._elasticapm_view_func = view_func
164
165 def process_response(self, request, response):
166 if django_settings.DEBUG and not self.client.config.debug:
167 return response
168 try:
169 if hasattr(response, "status_code"):
170 transaction_name = None
171 if self.client.config.django_transaction_name_from_route and hasattr(request.resolver_match, "route"):
172 transaction_name = request.resolver_match.route
173 elif getattr(request, "_elasticapm_view_func", False):
174 transaction_name = get_name_from_func(request._elasticapm_view_func)
175 if transaction_name:
176 transaction_name = build_name_with_http_method_prefix(transaction_name, request)
177 elasticapm.set_transaction_name(transaction_name, override=False)
178
179 elasticapm.set_context(
180 lambda: self.client.get_data_from_request(request, constants.TRANSACTION), "request"
181 )
182 elasticapm.set_context(
183 lambda: self.client.get_data_from_response(response, constants.TRANSACTION), "response"
184 )
185 elasticapm.set_context(lambda: self.client.get_user_info(request), "user")
186 elasticapm.set_transaction_result("HTTP {}xx".format(response.status_code // 100), override=False)
187 elasticapm.set_transaction_outcome(http_status_code=response.status_code, override=False)
188 except Exception:
189 self.client.error_logger.error("Exception during timing of request", exc_info=True)
190 return response
191
192
193 class ErrorIdMiddleware(MiddlewareMixin):
194 """
195 Appends the X-ElasticAPM-ErrorId response header for referencing a message within
196 the ElasticAPM datastore.
197 """
198
199 def process_response(self, request, response):
200 if not getattr(request, "_elasticapm", None):
201 return response
202 response["X-ElasticAPM-ErrorId"] = request._elasticapm["id"]
203 return response
204
205
206 class LogMiddleware(MiddlewareMixin):
207 # Create a thread local variable to store the session in for logging
208 thread = threading.local()
209
210 def process_request(self, request):
211 self.thread.request = request
212
[end of elasticapm/contrib/django/middleware/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/contrib/django/middleware/__init__.py b/elasticapm/contrib/django/middleware/__init__.py
--- a/elasticapm/contrib/django/middleware/__init__.py
+++ b/elasticapm/contrib/django/middleware/__init__.py
@@ -169,7 +169,9 @@
if hasattr(response, "status_code"):
transaction_name = None
if self.client.config.django_transaction_name_from_route and hasattr(request.resolver_match, "route"):
- transaction_name = request.resolver_match.route
+ r = request.resolver_match
+ # if no route is defined (e.g. for the root URL), fall back on url_name and then function name
+ transaction_name = r.route or r.url_name or get_name_from_func(r.func)
elif getattr(request, "_elasticapm_view_func", False):
transaction_name = get_name_from_func(request._elasticapm_view_func)
if transaction_name:
|
{"golden_diff": "diff --git a/elasticapm/contrib/django/middleware/__init__.py b/elasticapm/contrib/django/middleware/__init__.py\n--- a/elasticapm/contrib/django/middleware/__init__.py\n+++ b/elasticapm/contrib/django/middleware/__init__.py\n@@ -169,7 +169,9 @@\n if hasattr(response, \"status_code\"):\n transaction_name = None\n if self.client.config.django_transaction_name_from_route and hasattr(request.resolver_match, \"route\"):\n- transaction_name = request.resolver_match.route\n+ r = request.resolver_match\n+ # if no route is defined (e.g. for the root URL), fall back on url_name and then function name\n+ transaction_name = r.route or r.url_name or get_name_from_func(r.func)\n elif getattr(request, \"_elasticapm_view_func\", False):\n transaction_name = get_name_from_func(request._elasticapm_view_func)\n if transaction_name:\n", "issue": "Root Django URL doesn't appear in the UI when using routes\n**Describe the bug**: `transaction.name` field is missing from the `transcction` document\r\n\r\n**To Reproduce**\r\n\r\n1. set `DJANGO_TRANSACTION_NAME_FROM_ROUTE` to `True`\r\n2. have url of `path(\"\", TemplateView.as_view(template_name=\"pages/home.html\"), name=\"home\")`\r\n3. ...\r\n4. not profit\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.8\r\n- Framework and version [e.g. Django 2.1]: Django 3.1.6\r\n- APM Server version: 7.12\r\n- Agent version:6.1.1\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport logging\nimport threading\n\nfrom django.apps import apps\nfrom django.conf import settings as django_settings\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.client import client, get_client\nfrom elasticapm.utils import build_name_with_http_method_prefix, get_name_from_func, wrapt\n\ntry:\n from importlib import import_module\nexcept ImportError:\n from django.utils.importlib import import_module\n\ntry:\n from django.utils.deprecation import MiddlewareMixin\nexcept ImportError:\n # no-op class for Django < 1.10\n class MiddlewareMixin(object):\n pass\n\n\ndef _is_ignorable_404(uri):\n \"\"\"\n Returns True if the given request *shouldn't* notify the site managers.\n \"\"\"\n urls = getattr(django_settings, \"IGNORABLE_404_URLS\", ())\n return any(pattern.search(uri) for pattern in urls)\n\n\nclass ElasticAPMClientMiddlewareMixin(object):\n @property\n def client(self):\n try:\n app = apps.get_app_config(\"elasticapm.contrib.django\")\n return app.client\n except LookupError:\n return get_client()\n\n\nclass Catch404Middleware(MiddlewareMixin, ElasticAPMClientMiddlewareMixin):\n def process_response(self, request, response):\n if response.status_code != 404 or _is_ignorable_404(request.get_full_path()):\n return response\n if django_settings.DEBUG and not self.client.config.debug:\n return response\n data = {\"level\": logging.INFO, \"logger\": \"http404\"}\n result = self.client.capture(\n \"Message\",\n request=request,\n param_message={\"message\": \"Page Not Found: %s\", \"params\": [request.build_absolute_uri()]},\n logger_name=\"http404\",\n level=logging.INFO,\n )\n request._elasticapm = {\"service_name\": data.get(\"service_name\", self.client.config.service_name), \"id\": result}\n return response\n\n\ndef get_name_from_middleware(wrapped, instance):\n name = [type(instance).__name__, wrapped.__name__]\n if type(instance).__module__:\n name = [type(instance).__module__] + name\n return \".\".join(name)\n\n\ndef process_request_wrapper(wrapped, instance, args, kwargs):\n response = wrapped(*args, **kwargs)\n try:\n if response is not None:\n request = args[0]\n elasticapm.set_transaction_name(\n build_name_with_http_method_prefix(get_name_from_middleware(wrapped, instance), request)\n )\n finally:\n return response\n\n\ndef process_response_wrapper(wrapped, instance, args, kwargs):\n response = wrapped(*args, **kwargs)\n try:\n request, original_response = args\n # if there's no view_func on the request, and this middleware created\n # a new response object, it's logged as the responsible transaction\n # name\n if not hasattr(request, \"_elasticapm_view_func\") and response is not original_response:\n elasticapm.set_transaction_name(\n build_name_with_http_method_prefix(get_name_from_middleware(wrapped, instance), request)\n )\n finally:\n return response\n\n\nclass TracingMiddleware(MiddlewareMixin, ElasticAPMClientMiddlewareMixin):\n _elasticapm_instrumented = False\n _instrumenting_lock = threading.Lock()\n\n def __init__(self, *args, **kwargs):\n super(TracingMiddleware, self).__init__(*args, **kwargs)\n if not self._elasticapm_instrumented:\n with self._instrumenting_lock:\n if not self._elasticapm_instrumented:\n if self.client.config.instrument_django_middleware:\n self.instrument_middlewares()\n\n TracingMiddleware._elasticapm_instrumented = True\n\n def instrument_middlewares(self):\n middlewares = getattr(django_settings, \"MIDDLEWARE\", None) or getattr(\n django_settings, \"MIDDLEWARE_CLASSES\", None\n )\n if middlewares:\n for middleware_path in middlewares:\n module_path, class_name = middleware_path.rsplit(\".\", 1)\n try:\n module = import_module(module_path)\n middleware_class = getattr(module, class_name)\n if middleware_class == type(self):\n # don't instrument ourselves\n continue\n if hasattr(middleware_class, \"process_request\"):\n wrapt.wrap_function_wrapper(middleware_class, \"process_request\", process_request_wrapper)\n if hasattr(middleware_class, \"process_response\"):\n wrapt.wrap_function_wrapper(middleware_class, \"process_response\", process_response_wrapper)\n except ImportError:\n client.logger.warning(\"Can't instrument middleware %s\", middleware_path)\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n request._elasticapm_view_func = view_func\n\n def process_response(self, request, response):\n if django_settings.DEBUG and not self.client.config.debug:\n return response\n try:\n if hasattr(response, \"status_code\"):\n transaction_name = None\n if self.client.config.django_transaction_name_from_route and hasattr(request.resolver_match, \"route\"):\n transaction_name = request.resolver_match.route\n elif getattr(request, \"_elasticapm_view_func\", False):\n transaction_name = get_name_from_func(request._elasticapm_view_func)\n if transaction_name:\n transaction_name = build_name_with_http_method_prefix(transaction_name, request)\n elasticapm.set_transaction_name(transaction_name, override=False)\n\n elasticapm.set_context(\n lambda: self.client.get_data_from_request(request, constants.TRANSACTION), \"request\"\n )\n elasticapm.set_context(\n lambda: self.client.get_data_from_response(response, constants.TRANSACTION), \"response\"\n )\n elasticapm.set_context(lambda: self.client.get_user_info(request), \"user\")\n elasticapm.set_transaction_result(\"HTTP {}xx\".format(response.status_code // 100), override=False)\n elasticapm.set_transaction_outcome(http_status_code=response.status_code, override=False)\n except Exception:\n self.client.error_logger.error(\"Exception during timing of request\", exc_info=True)\n return response\n\n\nclass ErrorIdMiddleware(MiddlewareMixin):\n \"\"\"\n Appends the X-ElasticAPM-ErrorId response header for referencing a message within\n the ElasticAPM datastore.\n \"\"\"\n\n def process_response(self, request, response):\n if not getattr(request, \"_elasticapm\", None):\n return response\n response[\"X-ElasticAPM-ErrorId\"] = request._elasticapm[\"id\"]\n return response\n\n\nclass LogMiddleware(MiddlewareMixin):\n # Create a thread local variable to store the session in for logging\n thread = threading.local()\n\n def process_request(self, request):\n self.thread.request = request\n", "path": "elasticapm/contrib/django/middleware/__init__.py"}]}
| 3,049 | 219 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.